[机器学习实战] k-近邻算法(kNN)

K-近邻算法

from numpy import *
import operator # 运算符模块

def createDataSet():
    group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
    labels = ['A','A','B','B']
    return group, labels
group, labels = createDataSet()
group
array([[ 1. ,  1.1],
       [ 1. ,  1. ],
       [ 0. ,  0. ],
       [ 0. ,  0.1]])
labels
['A', 'A', 'B', 'B']
# inX: 待分类的向量
# dataSet: 训练样本集
# labels: 标签向量
# k: 最近邻居的数目
def classify0(inX, dataSet, labels, k):
    dataSetSize = dataSet.shape[0]
    # 欧氏距离计算
    diffMat = tile(inX, (dataSetSize,1)) - dataSet  # numpy.tile(A,n) 将A重复n次
    sqDiffMat = diffMat**2
    sqDistances = sqDiffMat.sum(axis=1)
    distances = sqDistances**0.5
    #print('distances:',distances)
    sortedDistIndicies = distances.argsort() # 按序号标记排序结果
    #print('sortedDistIndicies:',sortedDistIndicies)
    # 确实前k个最小距离的元素所在的分类
    classCount = {}
    for i in range(k):
        voteIlabel = labels[sortedDistIndicies[i]]
        classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
    #print('classCount:',classCount)
    # 按照value进行排序
    sortedClassCount = sorted(classCount.items(), key = operator.itemgetter(1), reverse=True)
    #print('sortedClassCount:',sortedClassCount)
    return sortedClassCount[0][0]
classify0([0,0],group,labels,3)
'B'

一、使用K-近邻算法改进约会网站的配对效果

1. 准备数据:从文本文件中解析数据

def file2matrix(filename):
    fr = open(filename)
    arrayOLines = fr.readlines()
    numberOfLines = len(arrayOLines)
    returnMat = zeros((numberOfLines, 3)) # 创建numpy矩阵,3个特征
    classLabelVector = []
    # 解析文件数据到列表
    index = 0
    for line in arrayOLines:
        line = line.strip()
        listFromLine = line.split('\t')
        returnMat[index,:] = listFromLine[:3]
        classLabelVector.append(int(listFromLine[-1])) # 不用int就会默认当作字符串处理
        index += 1
    return returnMat, classLabelVector
datingDataMat, datingLabels = file2matrix('data\datingTestSet2.txt')
datingDataMat
array([[  4.09200000e+04,   8.32697600e+00,   9.53952000e-01],
       [  1.44880000e+04,   7.15346900e+00,   1.67390400e+00],
       [  2.60520000e+04,   1.44187100e+00,   8.05124000e-01],
       ..., 
       [  2.65750000e+04,   1.06501020e+01,   8.66627000e-01],
       [  4.81110000e+04,   9.13452800e+00,   7.28045000e-01],
       [  4.37570000e+04,   7.88260100e+00,   1.33244600e+00]])
datingLabels[0:20]
[3, 2, 1, 1, 1, 1, 3, 3, 1, 3, 1, 1, 2, 1, 1, 1, 1, 1, 2, 3]

2. 分析数据:使用Matplotlib 创建散点图

import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:,1], datingDataMat[:,2], 15.0*array(datingLabels), 15.0*array(datingLabels))
plt.xlabel('Gaming time %')
plt.ylabel('Icecream L/week')
plt.show()

fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:,0], datingDataMat[:,1], 15.0*array(datingLabels), 15.0*array(datingLabels))
plt.xlabel('Gaining flight km/year')
plt.ylabel('Gaming time %')
plt.show()

3. 准备数据:归一化数值

def autoNorm(dataSet):
    minVals = dataSet.min(0) # 参数0而非index=0
    maxVals = dataSet.max(0)
    ranges = maxVals - minVals
    normDataSet = zeros(shape(dataSet))
    m = dataSet.shape[0]
    normDataSet = (dataSet - tile(minVals, (m,1))) / tile(ranges, (m,1))
    return normDataSet, ranges, minVals
normMat, ranges, minVals = autoNorm(datingDataMat)
normMat
array([[ 0.44832535,  0.39805139,  0.56233353],
       [ 0.15873259,  0.34195467,  0.98724416],
       [ 0.28542943,  0.06892523,  0.47449629],
       ..., 
       [ 0.29115949,  0.50910294,  0.51079493],
       [ 0.52711097,  0.43665451,  0.4290048 ],
       [ 0.47940793,  0.3768091 ,  0.78571804]])
ranges
array([  9.12730000e+04,   2.09193490e+01,   1.69436100e+00])
minVals
array([ 0.      ,  0.      ,  0.001156])

4. 测试算法: 作为完整程序验证分类器

def datingClassTest():
    hoRatio = 0.10
    datingDataMat, datingLabels = file2matrix('data/datingTestSet2.txt')
    normMat, ranges, minVals = autoNorm(datingDataMat)
    m = normMat.shape[0]
    numTestVecs = int(m*hoRatio)
    errorCount = 0.0
    for i in range(numTestVecs):
        classifierResult = classify0(normMat[i,:], normMat[numTestVecs:m,:], datingLabels[numTestVecs:m], 4)
        #print('the classifier came back with: %d, the real answer is: %d' % (classifierResult, datingLabels[i]))
        if (classifierResult != datingLabels[i]): errorCount += 1.0
    print('the total error rate is: %f' % (errorCount/float(numTestVecs)))
datingClassTest()
the total error rate is: 0.040000

5. 使用算法:构建完整可用系统

def classifyPerson():
    resultList = ['not at all','in small doses','in large doses']
    percentTats = float(input('percentage of time spent playing video games'))
    ffMiles = float(input('frequent filer miles earned per year'))
    iceCream = float(input('liters of ice cream consumed per year?'))
    datingDataMat, datingLabels = file2matrix('data\datingTestSet2.txt')
    normMat, ranges, minVals = autoNorm(datingDataMat)
    inArr = array([ffMiles, percentTats, iceCream])
    classifierResult = classify0((inArr-minVals)/ranges , normMat, datingLabels, 4)
    print('You will probably like this person:', resultList[classifierResult - 1])
classifyPerson()
percentage of time spent playing video games10
frequent filer miles earned per year1000
liters of ice cream consumed per year?0.1
You will probably like this person: in small doses

二、手写识别系统

1. 准备数据: 将图像转换为测试向量

def img2vector(filename):
    returnVect = zeros((1,1024)) # 32*32
    fr = open(filename)
    for i in range(32):
        lineStr = fr.readline()
        for j in range(32):
            returnVect[0, 32*i+j] = int(lineStr[j])
    return returnVect
testVector = img2vector('data/testDigits/0_0.txt')
testVector[0,0:31]
array([ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
        1.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.])
testVector[0,32:63]
array([ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,
        1.,  1.,  1.,  1.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
        0.,  0.,  0.,  0.,  0.])

2. 测试算法: 使用K-近邻算法识别手写数字

import os
os.listdir('data/trainingDigits')[0:5] # 获取目录内容
['0_0.txt', '0_1.txt', '0_10.txt', '0_100.txt', '0_101.txt']
((os.listdir('data/trainingDigits')[5]).split('.')[0]).split('_')[0]  # 从文件名解析分类数字
'0'
def handwritingClassTest():
    hwLabels = []
    trainingFileList = os.listdir('data/trainingDigits')
    m = len(trainingFileList)
    trainingMat = zeros((m,1024))
    for i in range(m):
        fileNameStr = trainingFileList[i]
        fileStr = fileNameStr.split('.')[0]
        classNumStr = int(fileStr.split('_')[0])
        hwLabels.append(classNumStr)
        trainingMat[i,:] = img2vector('data/trainingDigits/%s' % fileNameStr)
    testFileList = os.listdir('data/testDigits')
    errorCount = 0.0
    mTest = len(testFileList)
    for i in range(mTest):
        fileNameStr = testFileList[i]
        fileStr = fileNameStr.split('.')[0]
        classNumStr = int(fileStr.split('_')[0])
        vectorUnderTest = img2vector('data/testDigits/%s' % fileNameStr)
        classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
        #print('the classifier came back with: %d, the real answer is: %d' % (classifierResult, classNumStr))
        if (classifierResult != classNumStr): errorCount += 1.0
    print('\nthe total number of errors is: %d' % errorCount)
    print('\nthe total error rate is: %f' % (errorCount/float(mTest)))
handwritingClassTest()
the total number of errors is: 10

the total error rate is: 0.010571

算法的执行效率不高
需要为每个测试向量进行2000次距离计算,每个距离计算包括了1024个维度的浮点计算,总计要执行900次。
需要为测试向量准备2MB的存储空间。
k决策树就是k-近邻算法的优化版,可以节省大量的计算开销。

小结

K-近邻算法的特点

  1. 分类数据最简单有效的算法
  2. 必须有接近实际数据的训练样本数据
  3. 必须保存全部数据集,所以数据集不能过大
  4. 必须对每个数据计算距离值,非常耗时
  5. 无法给出任何数据的基础结构信息,无法知晓平均实例样本和典型实例样本具有什么特征。
posted @ 2017-06-16 16:34  戴戴Day  阅读(315)  评论(0编辑  收藏  举报