『Kaggle』Sklearn中几种分类器的调用&词袋建立

几种分类器的基本调用方法

本节的目的是基本的使用这些工具,达到熟悉sklearn的流程而已,既不会设计超参数的选择原理(后面会进行介绍),也不会介绍数学原理(应该不会涉及了,打公式超麻烦,而且近期也没有系统的学习机器学习数学原理的计划,下学期可能会重拾cs229,当然如果在上课展示或者实验室任务中用到的特定方法还是很可能用博客记录一下的,笑)。 

Logistic & SGDC

'''Logistic & SGDC'''

'''数据预处理'''
import numpy as np
import pandas as pd

column_names = ['Sample code number', 'Clump Trickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape',
                'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin',
                'Normal Nucleoli', 'Mitoses', 'Class']
data = pd.read_csv('C:\Projects\python3_5\Keggle\\breast-cancer-wisconsin.csv', names = column_names)
# print(data.shape) # (699,11)
data = data.replace(to_replace = '?', value = np.nan)  # 原数据缺失值为?,替换为标准缺失值
data = data.dropna(how = 'any')                        # 将含有标准缺失值的行替换掉
print(data.shape, '\r', '-----'*15)


'''训练测试数据分割'''
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(data[column_names[1:10]], data[column_names[10]],
                                                    test_size=0.25, random_state=33)
print(y_train.value_counts())
print(y_test.value_counts())


'''标准化数据并执行分类'''
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, SGDClassifier

# 方差为1,均值为0
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.fit_transform(X_test)

lr = LogisticRegression()
lr.fit(X_train, y_train)
lr_y_predict = lr.predict(X_test)

sgdc = SGDClassifier()
sgdc.fit(X_train, y_train)
sgdc_y_predict = sgdc.predict(X_test)


'''模型测评'''
from sklearn.metrics import classification_report

print('LR准确率:', lr.score(X_test, y_test))
print(classification_report(y_test, lr_y_predict, target_names=['Benign', 'Malignant']))
print('SGDC准确率:', sgdc.score(X_test, y_test))
print(classification_report(y_test, sgdc_y_predict, target_names=['Benign', 'Malignant']))

# SGDC效果浮动性很大,LR很稳定,一般情况下LR准确度更高
# recall召回率:预测为真的中真的为真的
# precision精确率:真的为真中被预测为真的

 

SVM

'''SVM'''

'''载入数据'''
from sklearn.datasets import load_digits

digits = load_digits()
print(digits.data.shape)


'''训练测试数据划分'''
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.25, random_state=33)
print(y_train.shape, '\r', y_test.shape)
#  print(y_test.value_counts()) 失败的原因是这是一个DataFrame方法


'''标准化&分类'''
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC  # 基于线性假设的支持向量机SVC

ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.fit_transform(X_test)

lsvc = LinearSVC()
lsvc.fit(X_train, y_train)
y_predict = lsvc.predict(X_test)


'''评估模型'''
from sklearn.metrics import classification_report

print('准确率:', lsvc.score(X_test, y_test))
print(classification_report(y_test, y_predict, target_names=digits.target_names.astype(str)))

  

 朴素贝叶斯

'''朴素贝叶斯'''

'''载入数据集'''
from sklearn.datasets import fetch_20newsgroups

news = fetch_20newsgroups(subset='all')
print(len(news.data))
print(news.data[0])


'''划分数据集'''
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.25, random_state=33)


'''文本数据向量化'''
# 不明白原理,需要进一步查询
from sklearn.feature_extraction.text import CountVectorizer

vec = CountVectorizer()
X_train = vec.fit_transform(X_train)
X_test = vec.transform(X_test)
# print('-----'*15)
# print(X_train[0])


'''朴素贝叶斯分类器'''
from sklearn.naive_bayes import MultinomialNB

mnb = MultinomialNB()
mnb.fit(X_train, y_train)
y_predict = mnb.predict(X_test)


'''评估模型'''
from sklearn.metrics import classification_report

print('准确率:', mnb.score(X_test, y_test))
print(classification_report(y_test, y_predict, target_names=news.target_names))

  

K近邻

'''K近邻'''

'''数据集载入'''
from sklearn.datasets import load_iris

iris = load_iris()
print(iris.data.shape)
print(iris.DESCR)

'''数据集划分'''
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.25, random_state=33)

'''数据集预处理(标准化)'''
from sklearn.preprocessing import StandardScaler

ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.fit_transform(X_test)

'''K近邻分类'''
from sklearn.neighbors import KNeighborsClassifier

knc = KNeighborsClassifier()
knc.fit(X_train, y_train)
y_predict = knc.predict(X_test)

'''评估'''
from sklearn.metrics import classification_report

print(knc.score(X_test, y_test))
print(classification_report(y_test, y_predict, target_names=iris.target_names))

sklearn.feature_extraction.text.CountVectorizer

建立词袋的方法,可以通过binary参数True和False表示是使用0,1还是使用出现次数记录对应单词,

print(vec.get_feature_names())
输出的是一个list,元素是很多字符串,表示特征

print(X_train[0])
输出的是一个稀疏矩阵的第0行(第一个文件),形式如下,

  (0, 57011) 1
  (0, 96571) 1
  (0, 11905) 1
  : :
  (0, 88624) 1
  (0, 54291) 1
  (0, 137926) 2

 为了直观理解,我们这样,

 

print(X_train[0][0, 57011])
会输出1,所以这真的是个矩阵(废话),而且稀疏矩阵提取元素是有问题的,
print(X_train[0][57011])
会报错,
print(X_train[0, 57011])
就没问题,
X_train.toarray()
可以转化为np数组

 

posted @ 2017-09-08 17:12  叠加态的猫  阅读(1111)  评论(0编辑  收藏  举报