[机器学习实战] 使用ID3算法的决策树

决策树-使用ID3算法划分数据集

1. 信息增益

from math import log

# 计算给定数据集的香农信息熵
def calcShannonEnt(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    shannonEnt = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key])/numEntries
        shannonEnt -= prob * log(prob,2)
    return shannonEnt
def createDataSet():
    dataSet = [[1,1,'yes'],
              [1,1,'yes'],
              [1,0,'no'],
              [0,1,'no'],
              [0,1,'no']]
    labels = ['no surfacing', 'filppers']
    return dataSet, labels
myDat, labels = createDataSet()
myDat
[[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
calcShannonEnt(myDat)
0.9709505944546686
myDat[0][-1]='maybe'
myDat
[[1, 1, 'maybe'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
calcShannonEnt(myDat)
1.3709505944546687

2. 划分数据集

# 按照给定特征featvec[axis]划分数据集,返回featVec[axis]==value的集合
def splitDataSet(dataSet, axis, value):
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reducedFeatVec = featVec[:axis]
            reducedFeatVec.extend(featVec[axis+1:]) # 抽取
            retDataSet.append(reducedFeatVec)
    return retDataSet
myDat, labels = createDataSet()
myDat
[[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
splitDataSet(myDat,0,1)
[[1, 'yes'], [1, 'yes'], [0, 'no']]
splitDataSet(myDat,0,0)
[[1, 'no'], [1, 'no']]
# 选择最好的数据集划分方式
def chooseBestFeatureToSplit(dataSet):
    numFeatures = len(dataSet[0]) - 1
    baseEntropy = calcShannonEnt(dataSet)
    bestInfoGain = 0.0
    bestFeature = -1
    for i in range(numFeatures):
        featList = [example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntropy = 0.0
        for value in uniqueVals:
            subDataSet = splitDataSet(dataSet, i, value)
            prob = len(subDataSet)/float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subDataSet)
        infoGain = baseEntropy - newEntropy
        if (infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature
myDat, labels = createDataSet()
chooseBestFeatureToSplit(myDat)
0
myDat
[[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]

3. 递归构造决策树

import operator

# 如果数据集已经处理了所有属性,凡是类标签依然不是唯一,此时可以通过多数表决的方式定义该叶子节点
def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys():
            classCount[vote] = 0
        classCount[vote] += 1
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]
#创建树
def createTree(dataSet, labels):
    classList = [example[-1] for example in dataSet]
    if classList.count(classList[0]) == len(classList): # 类别完全相同则停止继续划分
        return classList[0]
    if len(dataSet[0]) == 1: # 已经遍历了所有特征,返回多数表决的结果
        return majorityCnt(classList)
    bestFeat = chooseBestFeatureToSplit(dataSet)
    bestFeatLabel = labels[bestFeat]
    myTree = {bestFeatLabel:{}}  # 使用字典类型存储树
    del(labels[bestFeat])
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVals = set(featValues) # 得到列表包含的所有属性值
    for value in uniqueVals:
        subLabels = labels[:] # !!复制了类标签,因为labels是列表类型的,参数按照引用的方式传递需要保证不改变原始列表的内容
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
    return myTree
myDat, labels = createDataSet()
myTree = createTree(myDat, labels)
myTree
{'no surfacing': {0: 'no', 1: {'filppers': {0: 'no', 1: 'yes'}}}}

4. 构造注解树

# 使用文本注解工具(annotations)绘制树节点
import matplotlib.pyplot as plt
%matplotlib inline

# 定义文本框和箭头格式
decisionNode = dict(boxstyle='sawtooth', fc='0.8')
leafNode = dict(boxstyle='round4', fc='0.8')
arrow_args = dict(arrowstyle='<-')

# 绘制带箭头的注解
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
    # centerPt是文本框的位置(xytext), parentPt是箭头起始位置
    createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction', xytext=centerPt, textcoords='axes fraction',\
                           va='center', ha='center', bbox=nodeType, arrowprops=arrow_args)
def createPlot():
    fig = plt.figure(1, facecolor='white')
    fig.clf() # 清空绘图区
    createPlot.ax1 = plt.subplot(111, frameon=False)
    plotNode('decisionNode', (0.5,0.1), (0.1,0.5), decisionNode)
    plotNode('leafNode', (0.8,0.1), (0.3,0.8), leafNode)
    plt.show()
createPlot()

# 获取叶节点的数目和树的层数,来确定x轴的长度和y轴的高度
def getNumLeafs(myTree):
    numLeafs = 0
    firstStr = list(myTree.keys())[0]
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            numLeafs += getNumLeafs(secondDict[key])
        else: numLeafs += 1
    return numLeafs
def getTreeDepth(myTree):
    maxDepth = 0
    firstStr = list(myTree.keys())[0]
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            thisDepth = 1 + getTreeDepth(secondDict[key])
        else: thisDepth = 1
        if thisDepth > maxDepth: maxDepth = thisDepth
    return maxDepth
# 预先存储两个树的信息
def retrieveTree(i):
    listOfTrees = [{'no surfacing': {0: 'no', 1: {'filppers': {0: 'no', 1: 'yes'}}}},\
                  {'no surfacing': {0: 'no', 1: {'filppers': {0: {'head':{0:'no', 1: 'yes'}}, 1:'no'}}}}]
    return listOfTrees[i]
myTree = retrieveTree(0)
list(myTree.keys())[0]
'no surfacing'
getNumLeafs(myTree)
3
getTreeDepth(myTree)
2
# PlotTree
# 在父子节点间填充文本信息
def plotMidText(cntrPt, parentPt, txtString):
    xMid = (parentPt[0] - cntrPt[0])/2.0 + cntrPt[0]
    yMid = (parentPt[1] - cntrPt[1])/2.0 + cntrPt[1]
    createPlot.ax1.text(xMid, yMid, txtString)
def plotTree(myTree, parentPt, nodeTxt):
    numLeafs = getNumLeafs(myTree)
    depth = getTreeDepth(myTree)
    firstStr = list(myTree.keys())[0]
    cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
    plotMidText(cntrPt, parentPt, nodeTxt) # 标记子节点属性值
    plotNode(firstStr, cntrPt, parentPt, decisionNode) # 绘制带箭头的注解
    secondDict = myTree[firstStr]
    plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD # 减少y偏移
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            plotTree(secondDict[key], cntrPt, str(key))
        else:
            plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW # 右移x坐标
            plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
            plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
    plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD # !!当前分支画完返回上一位置
# 重新写createPlot
def createPlot(inTree):
    fig = plt.figure(1, facecolor='white')
    fig.clf()
    axprops = dict(xticks=[], yticks=[])
    createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)
    plotTree.totalW = float(getNumLeafs(inTree)) # 全局变量存储树的宽和高
    plotTree.totalD = float(getTreeDepth(inTree))
    plotTree.xOff = -0.5/plotTree.totalW
    plotTree.yOff = 1.0
    plotTree(inTree, (0.5,1.0), '')
    plt.show()
myTree
{'no surfacing': {0: 'no', 1: {'filppers': {0: 'no', 1: 'yes'}}}}
createPlot(myTree)

myTree['no surfacing'][3] = 'maybe'
createPlot(myTree)

5. 使用决策树进行分类

# 使用决策树的分类函数
def classify(inputTree, featLabels, testVec):
    firstStr = list(inputTree.keys())[0]
    secondDict = inputTree[firstStr]
    featIndex = featLabels.index(firstStr) # 用index()将标签字符串转换为索引,方便查找
    for key in secondDict.keys():
        if testVec[featIndex] == key:
            if type(secondDict[key]).__name__ == 'dict':
                classLabel = classify(secondDict[key], featLabels, testVec)
            else: classLabel = secondDict[key]
    return classLabel
myDat, labels = createDataSet()
labels
['no surfacing', 'filppers']
myTree = retrieveTree(0)
myTree
{'no surfacing': {0: 'no', 1: {'filppers': {0: 'no', 1: 'yes'}}}}
classify(myTree, labels, [1,1])
'yes'
classify(myTree, labels, [1,0])
'no'
# 使用pickle模块存储决策树
def storeTree(inputTree, filename):
    import pickle
    fw = open(filename,'wb')
    pickle.dump(inputTree, fw)
    fw.close()
def grabTree(filename):
    import pickle
    fr = open(filename,'rb')
    return pickle.load(fr)
storeTree(myTree, 'classifierStorage.txt')
grabTree('classifierStorage.txt')
{'no surfacing': {0: 'no', 1: {'filppers': {0: 'no', 1: 'yes'}}}}

示例:使用决策树预测隐形眼镜类型

fr = open('data/lenses.txt')
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
lensesLabels = ['age', 'prescript', 'astigmatic', 'tearRate']
lensesTree = createTree(lenses, lensesLabels)
print(lensesTree)
{'tearRate': {'normal': {'astigmatic': {'yes': {'prescript': {'hyper': {'age': {'presbyopic': 'no lenses', 'young': 'hard', 'pre': 'no lenses'}}, 'myope': 'hard'}}, 'no': {'age': {'presbyopic': {'prescript': {'hyper': 'soft', 'myope': 'no lenses'}}, 'young': 'soft', 'pre': 'soft'}}}}, 'reduced': 'no lenses'}}
createPlot(lensesTree)

匹配的选项过多导致Overfitting的问题,如果一个叶子节点只能增加少许信息,则可以删除该节点。
ID3算法的缺点:无法直接处理数值型数据。

posted @ 2017-06-23 14:23  戴戴Day  阅读(1119)  评论(0编辑  收藏  举报