示例:在约会网站上使用k-近邻算法
(1) 收集数据:提供文本文件。
(2) 准备数据:使用Python解析文本文件。
(3) 分析数据:使用Matplotlib画二维扩散图。
(4) 训练算法:此步骤不适用于k-近邻算法。
(5) 测试算法:使用海伦提供的部分数据作为测试样本。
测试样本和非测试样本的区别在于:测试样本是已经完成分类的数据,如果预测分类与实际类别不同,则标记为一个错误。
(6) 使用算法:产生简单的命令行程序,然后海伦可以输入一些特征数据以判断对方是否为自己喜欢的类型。
完整的代码
from numpy import *
import operator
#创建数据
def createDataSet():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group, labels
#数据分析 KNN
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
#❶(以下三⾏) 距离计算
diffMat = tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()
classCount={}
#❷ (以下两⾏) 选择距离最⼩的k个点
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.items(),
#❸ 排序
key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
# 将⽂本记录转换为NumPy的解析程序
def file2matrix(filename):
fr = open(filename)
arrayOlines=fr.readlines() #readlines() 方法用于读取所有行(直到结束符 EOF)并返回列表
numberOfLines = len(arrayOlines) #❶ 得到⽂件⾏数
returnMat = zeros((numberOfLines,3)) #❷ 创建返回的Numpy矩阵
classLabelVector = []
index = 0
#❸ (以下三⾏) 解析⽂件数据到列表
for line in arrayOlines:
line = line.strip() #strip() 方法用于移除字符串头尾指定的字符(默认为空格或换行符)或字符序列
listFromLine = line.split('\t') #split() 通过指定分隔符对字符串进行切片
returnMat[index,:] = listFromLine[0:3] #将数据填充到矩阵,3个特征值
classLabelVector.append(int(listFromLine[-1])) #添加listFromLine的最后一个元素到列表
index += 1
return returnMat,classLabelVector
datingDataMat, datingLabels = file2matrix('D:/Python Project/.vscode/Machine-learning-practical-notes/KNN/datingTestSet2.txt')
# print(datingDataMat)
# [[ 4.09200000e+04 8.32697600e+00 9.53952000e-01]
# [ 1.44880000e+04 7.15346900e+00 1.67390400e+00]
# [ 2.60520000e+04 1.44187100e+00 8.05124000e-01]
# ...,
# [ 2.65750000e+04 1.06501020e+01 8.66627000e-01]
# [ 4.81110000e+04 9.13452800e+00 7.28045000e-01]
# [ 4.37570000e+04 7.88260100e+00 1.33244600e+00]]
# print(datingLabels[0:20])
# [3, 2, 1, 1, 1, 1, 3, 3, 1, 3, 1, 1, 2, 1, 1, 1, 1, 1, 2, 3]
#使用matplotlib
import matplotlib
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.scatter(datingDataMat[:,1], datingDataMat[:,2]) #单色显示图(x,y轴的点)
ax.scatter(datingDataMat[:,1], datingDataMat[:,2],15.0*array(datingLabels), 15.0*array(datingLabels))#双色显示图
# plt.show()
# 归⼀化特征值
# newValue = (oldValue-min)/(max-min)
def autoNorm(dataSet):
minVals = dataSet.min(0) #每列的最⼩值,dataSet.min(0)中的参数0使得函数可以从列中选取最⼩值, ⽽不是选取当前⾏的最⼩值
# print(minVals)
# [ 0. 0. 0.001156]
maxVals = dataSet.max(0) #每列的最大值
# print(maxVals)
# [ 9.12730000e+04 2.09193490e+01 1.69551700e+00]
ranges = maxVals - minVals
# print(ranges)
# [ 9.12730000e+04 2.09193490e+01 1.69436100e+00]
normDataSet = zeros(shape(dataSet)) #0矩阵
m = dataSet.shape[0] #1000个数据
# print(m)
# 1000
normDataSet = dataSet - tile(minVals, (m,1)) #oldValue-min
normDataSet = normDataSet/tile(ranges, (m,1)) #❶ 特征值相除,ranges = max-min
return normDataSet, ranges, minVals
# normMat, ranges, minVals = autoNorm(datingDataMat)
# print(normMat)
# [[ 0.44832535 0.39805139 0.56233353]
# [ 0.15873259 0.34195467 0.98724416]
# [ 0.28542943 0.06892523 0.47449629]
# ...,
# [ 0.29115949 0.50910294 0.51079493]
# [ 0.52711097 0.43665451 0.4290048 ]
# [ 0.47940793 0.3768091 0.78571804]]
# print(ranges)
# [ 9.12730000e+04 2.09193490e+01 1.69436100e+00]
# print(minVals)
# [ 0. 0. 0.001156]
#分类器针对约会⽹站的测试代码
def datingClassTest():
# 数据集的占比
hoRatio = 0.10
#打开文件获取数据
datingDataMat,datingLabels = file2matrix('D:/Python Project/.vscode/Machine-learning-practical-notes/KNN/datingTestSet2.txt')
#进行归一化操作
normMat, ranges, minVals = autoNorm(datingDataMat)
#归一化后的矩阵大小 m=1000
m = normMat.shape[0]
# 测试集的数量 100
numTestVecs = int(m*hoRatio)
# 错误的数量
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
print ("the classifier came back with: %d, the real answer is: %d"% (classifierResult, datingLabels[i]))
# 如果不匹配,errorCount++
if (classifierResult != datingLabels[i]): errorCount += 1.0
# 输出出错百分比
print ("the total error rate is: %f" % (errorCount/float(numTestVecs)))
# 运行分类器针对约会⽹站的测试代码
# datingClassTest()
# 约会网站预测函数
def classifyPerson():
resultList = ['not at all','in small doses','in large doses']
# percentTats = float(input("percentage of time spent playing video games?"))
# miles = float(input("frequent flier mies earned per year?"))
# iceCream = float(input("liters of ice cream consumed per week?"))
percentTats_str = input("percentage of time spent playing video games?")
miles_str = input("frequent flier mies earned per year?")
iceCream_str = input("liters of ice cream consumed per week?")
percentTats = float(percentTats_str)
miles = float(miles_str)
iceCream = float(iceCream_str)
datingDataMat,datingLabels = file2matrix('D:/Python Project/.vscode/Machine-learning-practical-notes/KNN/datingTestSet2.txt')
normMat,ranges,minVals = autoNorm(datingDataMat)
inArr = array([miles,percentTats,iceCream])
classifierResult = classify0((inArr-minVals)/ranges,normMat,datingLabels,3)
print("\nyou will probably like this person:",resultList[classifierResult - 1])
classifyPerson()
网友评论