Python数据分析易错知识点归纳(六):机器学习
六、机器学习
分类和聚类的区别
- 分类是有监督学习,聚类是无监督学习
- 分类算法用于预测新样本,聚类用于理解已知数据
标准化/归一化
type_se_num = type_se[type_se!= 'object'] # 类别列不参与标准化
type_se_num.drop('target', inplace=True) # 目标列不参与标准化
# 标准化
# 方法一:使用自定义函数进行标准化
def normalize(se):
return (se - se.mean()) / se.std() # 这里与describe()中的std一致,不要用np.std()
for col_name in type_se_num.index:
data[col_name] = normalize(data[col_name])
# 若是对所有列进行标准化,直接使用apply即可
data = data.apply(lambda: x: (x - x.mean()) / x.std())
# 方法二:使用库函数进行标准化
from sklearn.preprocessing import StandardScaler
data_norm = StandardScaler().fit_transform(data[type_se_num.index])
df_norm = pd.DataFrame(data_norm, columns=type_se_num.index)
for col_name in type_se_num.index:
data[col_name] = df_norm[col_name]
# 归一化
data = data.apply(lambda: x: (x - x.min()) / (x.max() - x.min())
降维
from sklearn.decomposition import PCA
def matrixDimensionalityReduction(data,contributionRate=.95):
pca = PCA()
pca.fit(data)
x = np.cumsum(pca.explained_variance_ratio_)
# 计算贡献度大等于contributionRate的列数量
for count in range(len(x)):
if (x[count] >= contributionRate):
count += 1
break;
# 降维取前count列矩阵数据处理操作
pca = PCA(count)
pca.fit(data)
low_d = pca.transform(data) # 降维
return pd.DataFrame(low_d)
X = data.drop('target', axis=1)
y = data['target']
X = matrixDimensionalityReduction(X)
拆分测试集和训练集
# 方法一: 直接取
X = data[[ 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9', 'A10', \
'A11', 'A12', 'A13', 'A14', 'A15', 'A17', 'A18', 'A16_0', 'A16_1', 'A16_2']]
y = data['price']
# 方法二:删除不要的
X = data.drop(['编号', '房屋朝向', 'Label'], axis=1) # 注意要删除标签列
y = data['Label']
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.3, random_state=6)
分类模型
#逻辑回归
from sklearn.linear_model import LogisticRegression
model=LogisticRegression()
model.fit(train_X,train_y)
print('逻辑回归正确率=',model.score(test_X,test_y))
#随机森林
from sklearn.ensemble import RandomForestClassifier
model=RandomForestClassifier()
model.fit(train_X,train_y)
print('随机森林正确率=',model.score(test_X,test_y))
#支持向量机
from sklearn.svm import SVC,LinearSVC
model=SVC()
model.fit(train_X,train_y)
print('支持向量机SVC正确率=',model.score(test_X,test_y))
model=LinearSVC()
model.fit(train_X,train_y)
print('支持向量机LinearSVC正确率=',model.score(test_X,test_y))
#梯度提升决策树
from sklearn.ensemble import GradientBoostingClassifier
model=GradientBoostingClassifier()
model.fit(train_X,train_y)
print('梯度提升决策树正确率=',model.score(test_X,test_y))
#K近邻算法
from sklearn.neighbors import KNeighborsClassifier
model=KNeighborsClassifier()
model.fit(train_X,train_y)
print('K近邻算法正确率=',model.score(test_X,test_y))
#朴素贝叶斯算法
from sklearn.naive_bayes import GaussianNB
model=GaussianNB()
model.fit(train_X,train_y)
print('朴素贝叶斯算法正确率=',model.score(test_X,test_y))
回归模型
#线性回归
from sklearn.linear_model import LinearRegression
model=LinearRegression()
model.fit(train_X,train_y)
print('线性回归正确率=',model.score(test_X,test_y))
#随机森林
from sklearn.ensemble import RandomForestRegressor
model=RandomForestRegressor()
model.fit(train_X,train_y)
print('随机森林正确率=',model.score(test_X,test_y))
#决策树
from sklearn.tree import DecisionTreeRegressor
model=DecisionTreeRegressor()
model.fit(train_X,train_y)
print('决策树正确率=',model.score(test_X,test_y))
模型结果评估
from sklearn.metrics import mean_squared_error, r2_score
predict_y = model.predict(test_X) # model为通过不同算法选择的最优算法
print('MSE Score (Test): %f' % mean_squared_error(test_y, predict_y)) #均方误差
print('R2 Score (Test): %f' % r2_score(test_y, predict_y)) #R方,就是1-rmse平方根误差/var方差
网格搜索
#随机森林
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
model=RandomForestRegressor()
model.fit(train_X,train_y)
print('随机森林正确率=',model.score(test_X,test_y))
# 对有效模型进行微调:网格搜索(正确率在80%左右时可以使用)
from sklearn.model_selection import GridSearchCV
param_grid=[{"n_estimators":[3,10,30],
'max_features':[2,4,6,8]},
{'bootstrap':[False],
'n_estimators':[3,10,20],
'max_features':[2,3,4,5]}]
grid_search=GridSearchCV(model, param_grid, cv=5, scoring='neg_mean_squared_error')
grid_search.fit(train_X,train_y)
print('最佳超参数:')
print(grid_search.best_params_)
print('最佳随机森林估算器:')
print(grid_search.best_estimator_)
print('全部估算过程:')
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
# 用最佳模型继续预测和训练
model = grid_search.best_estimator_
predict_y = model.predict(test_X) # model为通过不同算法选择的最优算法
test_mse = mean_squared_error(test_y, predict_y)
test_r2 = r2_score(test_y, predict_y)
print('均方误差MSE: %f' % test_mse)
print('R2系数: %f' % test_r2)
聚类
k = 4 #聚类的类别
iteration = 500 #聚类最大循环次数
from sklearn.cluster import KMeans
model = KMeans(n_clusters = k, n_jobs = 4, max_iter = iteration) #分为k类,并发数4
model.fit(data_normalize) #开始聚类
#简单打印结果
#r1 = pd.Series(model.labels_).value_counts() #统计各个类别的数目
#r2 = pd.DataFrame(model.cluster_centers_) #找出聚类中心
#r = pd.concat([r2, r1], axis = 1) #横向连接(0是纵向),得到聚类中心对应的类别下的数目
#r.columns = list(data_normalize.columns) + [u'类别数目'] #重命名表头
#print(r)
##详细输出原始数据及其类别
r = pd.concat([data_gr, pd.Series(model.labels_, index = data_gr.index)], axis = 1) #详细输出每个样本对应的类别
r.columns = list(data_gr.columns) + [u'聚类类别'] #重命名表头
#r.to_excel(outputfile) #保存结果
def density_plot(df, index): #自定义作图函数
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
p = df.plot(kind='kde', linewidth = 2, subplots = True, sharex = False)
plt.title('客户群=%d, 聚类数量=%d' % (index, len(df)), x=0.5, y=3.5)
[p[i].set_ylabel(u'密度') for i in range(len(df.columns))]
plt.legend()
return plt
pic_output = '类别_' #概率密度图文件名前缀
for i in range(k):
density_plot(data_gr[r[u'聚类类别']==i], i).savefig(u'%s%s.png' %(pic_output, i))
机器学习分析步骤
-
提出问题
-
理解数据
- 采集数据
- 导入数据
- 查看数据集信息
- describe()描述统计信息
- info()查看缺失数据
-
数据清洗
- 数据预处理
- 缺失值填充fillna
- 删除缺失值dropna
- 特征工程
- 特征提取(one-hot独热编码get_dummies()、map函数)(需对所有类别型字段进行独热编码处理!!!)
- 特征选择(相关系数corr())
- 数据预处理
-
构建模型
- 训练数据和测试数据拆分
- 机器学习算法选取
-
模型评估(score)
-
方案实施(撰写分析报告)
God will send the rain when you are ready.You need to prepare your field to receive it.