机器学习----实验四代码理解

import math
from math import log
import numpy as np
import operator
import csv


# 数据加载方式
# 方式1;
def loaddata():
    dataSet = [[0, 0, 0, 0, 0, 0, 'yes'],
               [1, 0, 1, 0, 0, 0, 'yes'],
               [1, 0, 0, 0, 0, 0, 'yes'],
               [0, 0, 1, 0, 0, 0, 'yes'],
               [2, 0, 0, 0, 0, 0, 'yes'],
               [0, 1, 0, 0, 1, 1, 'yes'],
               [1, 1, 0, 1, 1, 1, 'yes'],
               [1, 1, 0, 0, 1, 0, 'yes'],
               [1, 1, 1, 1, 1, 0, 'no'],
               [0, 2, 2, 0, 2, 1, 'no'],
               [2, 2, 2, 2, 2, 0, 'no'],
               [2, 0, 0, 2, 2, 1, 'no'],
               [0, 1, 0, 1, 0, 0, 'no'],
               [2, 1, 1, 1, 0, 0, 'no'],
               [1, 1, 0, 0, 1, 1, 'no'],
               [2, 0, 0, 2, 2, 0, 'no'],
               [0, 0, 1, 1, 1, 0, 'no']]
    feature_name = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6']
    return dataSet, feature_name


# 方式2
# def loaddata_new():
#     # 定义文件路径
#     csv_path = 'watermelon2.csv'
#     with open(csv_path, 'r', encoding='utf-8-sig')as fp:
#         dataSet = [i for i in csv.reader(fp)]  # csv.reader 读取到的数据是list类型
#     feature_name = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6']
#     return dataSet, feature_name

# (2)计算数据集的熵;


def entropy(dataSet):
    m = len(dataSet)
    # 保存所有的类别及属于该类别的样本数
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    # print(labelCounts)
    # 保存熵值
    i = 0
    s = ['a', 'b', 'c']
    for E_key in labelCounts.keys():
        s[i] = E_key
        i = i + 1

    a = labelCounts[s[0]]
    b = labelCounts[s[1]]
    p1 = a / m
    p2 = b / m
    e = 0.0
    # 补充计算信息熵的代码
    e = -1 * (p1 * math.log2(p1) + p2 * math.log2(p2))
    return e


# (3)划分数据集;
def splitDataSet(dataSet, axis, value):
    # 补充按给定特征和特征值划分好的数据集的代码
    # axis对应的是特征的索引;
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reduceFeatVec = featVec[:axis]
            reduceFeatVec.extend(featVec[axis + 1:])
            retDataSet.append(reduceFeatVec)
    return retDataSet


def calcEnt(dataSet):
    countDataSet = len(dataSet)
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    Ent = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key]) / countDataSet
        Ent -= prob * log(prob, 2)
    return Ent


# (4)选择最优特征;
def chooseBestFeature(dataSet):
    n = len(dataSet[0]) - 1  # [0/1, 0/1, 0/1, 0/1, 0/1, 0/1, 'yes/no']的长度-1,去掉yes/no
    # 计数整个数据集的熵
    ll = len(dataSet)
    baseEntropy = entropy(dataSet)
    # 计算信息熵
    bestInfoGain = 0.0  # 初始化最大信息增益
    bestFeature = -1  # 最佳划分特征
    # 遍历每个特征
    for i in range(n):
        # 获取当前特征i的所有可能取值
        featList = [example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntropy = 0.0
        # 遍历特征i的每一个可能的取值
        # 补充计算条件熵的代码
        res = 0
        for value in uniqueVals:
            # 按特征i的value值进行数据集的划分
            subDataSet = splitDataSet(dataSet, i, value)
            len1 = len(subDataSet) - 1
            prob = len(subDataSet) / float(len(dataSet))
            newEntropy += prob * calcEnt(subDataSet)

        # 计算信息增益
        infoGain = baseEntropy - newEntropy
        # 保存当前最大的信息增益及对应的特征
        if infoGain > bestInfoGain:
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature


myDat, feature_name = loaddata()
chooseBestFeature(myDat)


# (5)类别投票表决;
def classVote(classList):
    # 定义字典,保存每个标签对应的个数
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys():
            classCount[vote] = 0
        classCount[vote] += 1
    # 排序
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]


# (6)递归训练一棵树;
def trainTree(dataSet, feature_name):
    classList = [example[-1] for example in dataSet]
    # 所有类别都一致
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    # 数据集中没有特征
    if len(dataSet[0]) == 1:
        return classVote(classList)
    # 选择最优划分特征
    bestFeat = chooseBestFeature(dataSet)
    bestFeatName = feature_name[bestFeat]
    myTree = {bestFeatName: {}}
    featValues = [example[bestFeat] for example in dataSet]

    del [feature_name[bestFeat]]
    uniqueVals = set(featValues)
    # 遍历uniqueVals中的每个值,生成相应的分支
    for value in uniqueVals:
        sub_feature_name = feature_name[:]
        # 生成在dataSet中bestFeat取值为value的子集;
        # 根据得到的子集,生成决策树
        myTree[bestFeatName][value] = trainTree(splitDataSet(dataSet, bestFeat, value), sub_feature_name)  # 补充代码;
    return myTree


myDat, feature_name = loaddata()
myTree = trainTree(myDat, feature_name)
print(myTree)
posted @   ICE_棋  阅读(47)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· Manus的开源复刻OpenManus初探
· AI 智能体引爆开源社区「GitHub 热点速览」
· 三行代码完成国际化适配,妙~啊~
· .NET Core 中如何实现缓存的预热?
点击右上角即可分享
微信分享提示