美文网首页
15 ML k-means

15 ML k-means

作者: peimin | 来源:发表于2016-06-09 22:19 被阅读0次

    k-means 为聚类的一种方法,可以根据给定的k个聚点来分类。即计算距离根据最近点的距离来分类。
    二分 k-means 用来克服 k-means算法收敛于局部最小值的问题。

    from numpy import *
    
    def loadDataSet(fileName):      #general function to parse tab -delimited floats
        dataMat = []                #assume last column is target value
        fr = open(fileName)
        for line in fr.readlines():
            curLine = line.strip().split('\t')
            fltLine = map(float,curLine) #map all elements to float()
            dataMat.append(fltLine)
        return dataMat
    
    def distEclud(vecA, vecB):
        return sqrt(sum(power(vecA - vecB, 2))) #la.norm(vecA-vecB)
    
    def randCent(dataSet, k):
        n = shape(dataSet)[1]
        centroids = mat(zeros((k,n)))#create centroid mat
        for j in range(n):#create random cluster centers, within bounds of each dimension
            minJ = min(dataSet[:,j]) 
            rangeJ = float(max(dataSet[:,j]) - minJ)
            centroids[:,j] = mat(minJ + rangeJ * random.rand(k,1))
        return centroids
    
    datMat = mat(loadDataSet('testSet.txt'))
    randC = randCent(datMat, 2)
    print('randC', randC)
    print('max dist', distEclud(datMat[0], datMat[1]))
    
    def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
        m = shape(dataSet)[0] # m -> row
        clusterAssment = mat(zeros((m,2)))
        #create mat to assign data points 
        #to a centroid, also holds SE of each point
    
        # 1.get rand centro
        centroids = createCent(dataSet, k)
    
        clusterChanged = True
        while clusterChanged:
            clusterChanged = False
            for i in range(m):#for each data point assign it to the closest centroid
                minDist = inf; 
                minIndex = -1
                
                # 2.calc k dist and get min dist
                for j in range(k):
                    distJI = distMeas(centroids[j,:],dataSet[i,:])
                    if distJI < minDist:
                        minDist = distJI; 
                        minIndex = j
    
                # 3.no min dist
                if clusterAssment[i,0] != minIndex: 
                    clusterChanged = True
    
                clusterAssment[i,:] = minIndex, minDist**2
    
            print centroids
    
            # update centro position
            for cent in range(k):#recalculate centroids
                ptsInClust = dataSet[nonzero(clusterAssment[:,0].A == cent)[0]]
                #get all the point in this cluster
    
                centroids[cent,:] = mean(ptsInClust, axis=0) #assign centroid to mean 
        return centroids, clusterAssment
    
    # bisecting k-means
    def biKmeans(dataSet, k, distMeas=distEclud):
        m = shape(dataSet)[0]
        clusterAssment = mat(zeros((m,2)))
        centroid0      = mean(dataSet, axis=0).tolist()[0]
        centList       =[centroid0] #create a list with one centroid
    
        for j in range(m):#calc initial Error
            clusterAssment[j,1] = distMeas(mat(centroid0), dataSet[j,:])**2
    
        while (len(centList) < k):
            lowestSSE = inf
    
            for i in range(len(centList)):
                ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]#get the data points currently in cluster i
                centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
                
                sseSplit = sum(splitClustAss[:,1])#compare the SSE to the currrent minimum
                sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1])
                print "sseSplit, and notSplit: ",sseSplit,sseNotSplit
                
                if (sseSplit + sseNotSplit) < lowestSSE:
                    bestCentToSplit = i
                    bestNewCents = centroidMat
                    bestClustAss = splitClustAss.copy()
                    lowestSSE    = sseSplit + sseNotSplit
    
            bestClustAss[nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #change 1 to 3,4, or whatever
            bestClustAss[nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit
            
            print 'the bestCentToSplit is: ',bestCentToSplit
            print 'the len of bestClustAss is: ', len(bestClustAss)
            
            centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#replace a centroid with two best centroids 
            centList.append(bestNewCents[1,:].tolist()[0])
            clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#reassign new clusters, and SSE
        return mat(centList), clusterAssment
    

    相关文章

      网友评论

          本文标题:15 ML k-means

          本文链接:https://www.haomeiwen.com/subject/tgcsdttx.html