Notes : <Hands-on ML with Sklearn & TF> Chapter 3
Chapter 3-Classification
MNIST¶
- MNIST is a dataset which has 70,000 small images
- "Hello World" of Machine Learning
# fetch MNIST,
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
#但是总是显示下载失败,下载mnist-original.mat到~/scikit_learn_data/mldata/内。
#mldata.org//google
- A DESCR key describing the dataset
- A data key containing an array with one row per instance and one column per feature
- A target containing an array with the labels
X, y = mnist["data"],mnist["target"]
print(X.shape,y.shape) #784 = 28pixels x 28pixels from 0-255(white-black)
#show
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
some_digit = X[12345]
some_digit_image = some_digit.reshape(28,28)
plt.imshow(some_digit_image, cmap=matplotlib.cm.binary, interpolation="nearest")
plt.axis('off')
plt.show()
# EXTRA
import numpy as np
def plot_digits(instances, images_per_row=10, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
images = [instance.reshape(size,size) for instance in instances] #转换成100个像素阵
n_rows = (len(instances) - 1) // images_per_row + 1
row_images = []
n_empty = n_rows * images_per_row - len(instances)
images.append(np.zeros((size, size * n_empty)))
for row in range(n_rows):
rimages = images[row * images_per_row : (row + 1) * images_per_row]
row_images.append(np.concatenate(rimages, axis=1)) #实现list的reshape
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap = matplotlib.cm.binary, **options)
plt.axis("off")
plt.figure(figsize=(9,9))
example_images = np.r_[X[:12000:600], X[13000:30600:600], X[30600:60000:590]] #把这100个图连起来
plot_digits(example_images, images_per_row=10)
plt.show()
y[12345]
- shuffle the train set
- similar cross-validation folds
- some algorithms sensitive to instance's order , similar instances in a row performs poorly
import numpy as np
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
Train a Binary Classifer¶
- 判断一张图是不是某个数字就是一个 Binary Classifer问题。 如: 5 or not-5
- Stochastic Grandient Descant(SGD) Classifer 随机梯度下降分类
- train instance independently
y_train_5 = (y_train == 5)
y_test_5 = (y_test == 5)
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(random_state = 42)
sgd_clf.fit(X_train, y_train_5)
sgd_clf.predict([X[36000]])
Preformance Measures¶
Measuing Accuracy Using Cross-Validation
- need more control
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
#StratifiedKFold performs stratified sampling
skfolds = StratifiedKFold(n_splits=3, random_state=42)
for train_index, test_index in skfolds.split(X_train, y_train_5):
clone_clf = clone(sgd_clf)
X_train_folds = X_train[train_index]
y_train_folds = (y_train_5[train_index])
X_test_fold = X_train[test_index]
y_test_fold = (y_train_5[test_index])
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_fold)
n_current = sum(y_pred == y_test_fold)
print(n_current/len(y_pred))
# use cross_val_score
from sklearn.model_selection import cross_val_score
cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring='accuracy')
这并不代表精确度高,因为即使全为no-5s的交叉验证的正确率也有90%
from sklearn.base import BaseEstimator
class Never5Classifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
never_5_clf = Never5Classifier()
cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring='accuracy')
Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
confusion_matrix(y_train_5, y_train_pred)
y_train_perfect_predictions = y_train_5
confusion_matrix(y_train_5, y_train_perfect_predictions)
Precision and Recall
from sklearn.metrics import precision_score, recall_score
print(precision_score(y_train_5, y_train_pred))
print(recall_score(y_train_5, y_train_pred))
# f1 is the harmonic mean
from sklearn.metrics import f1_score
f1_score(y_train_5, y_train_pred)
f1 favor classifier that has similar precision and recall</br> 但情况并不总是这样</br> 宁可错杀一百,不可放过一个:low recall, high precision , 如:视频等级划分;或者情况相反:如抓小偷</br>
Precision / Recall Tradeoff
- lowing the threshold increase recall and reduce precision
- sklearn doesn't let you set the threshold directly and give you access to the decision secores(use to prediction)
some_digit_index = 36000
some_digit = X[some_digit_index]
y_scores = sgd_clf.decision_function([some_digit])
y_scores
threshold = 0
y_some_digit_pred = (y_scores > threshold)
y_some_digit_pred
threshold = 200000
y_some_digit_pred = (y_scores > threshold)
y_some_digit_pred
decide which threshlod to use
#使用交叉验证获取分数
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method="decision_function")
from sklearn.metrics import precision_recall_curve
#计算所有precision和recall
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)
#画出来
def plot_precision_recall_vs_threshold(precisions, recalls, threshold):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
plt.xlabel("Threshold")
plt.legend(loc="upper left")
plt.ylim([0,1])
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.show()
y_train_pred_90 = (y_scores > 250000)
precision_score(y_train_5, y_train_pred_90)
recall_score(y_train_5, y_train_pred_90)
just set the a high enough threshold to creat a classifier with virtually any precision
ROC 受试者工作特征曲线 : the true positive rate against the false positive rate
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0,1], [0,1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plot_roc_curve(fpr, tpr)
plt.show()
# compute the area under the curve(AUC)
from sklearn.metrics import roc_auc_score
roc_auc_score(y_train_5, y_scores)
- positive calss is rare or more care the false negatives use the PR curve
- otherwise use the ROC(ROC, AUC)
- sklearn give decision_function() or predict_proba()(return an array containing an row per instance and a column per class, each containing the probability that the given instance belongs to the given calss)
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(random_state = 42)
y_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method="predict_proba")
# use the posobility as the scores
y_scores_forest = y_probas_forest[:,1]
fprs_forest, tprs_forest, thresholds_forest = roc_curve(y_train_5, y_scores_forest)
plt.plot(fpr, tpr, 'b:', label="SGD")
plot_roc_curve(fprs_forest, tprs_forest, "Random forest")
plt.legend(loc='bottom right')
plt.show()
roc_auc_score(y_train_5, y_scores_forest)
- how to train binary classifier
- choose metric for task
- evaluate your classifiers using cross-validation
- select the Precision/Recall tradeoff that fits your needs and compare various medel using ROC curve and ROC/AUC scores
Multiclass Classification¶
- 有些算法本身支持多分类
- 也可使用多个二分类代替的策略
- 多个二分类,要分类时,每个都进行分类,选最高分(OvA)
- 为每一对训练一个分类,如:1-2,1-3,...,9-8,...,一共需要N(N-1)/2,称为one versus one(OvO)
- 一些数据集规模和算法规模关联性不强的使用OvO,如:SVM;其他的使用OvA
- sklearn在使用二分类处理多分类时,自动合适的使用OvA或者OvO
#try SGDClassifier
sgd_clf.fit(X_train, y_train)
sgd_clf.predict([some_digit])
some_digit_secores = sgd_clf.decision_function([some_digit])
some_digit_secores
np.argmax(some_digit_secores)
sgd_clf.classes_
sgd_clf.classes_[5] #巧了
#force sklearn to use OvO or OvA: use OneVsOneClassifer or OneVsRestClassifer
from sklearn.multiclass import OneVsOneClassifier
ovo_clf=OneVsOneClassifier(SGDClassifier(random_state=42))
ovo_clf.fit(X_train, y_train)
ovo_clf.predict([some_digit])
forest_clf.fit(X_train, y_train)
forest_clf.predict([some_digit])
forest_clf.predict_proba([some_digit])
cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring='accuracy')
#简单的对输入的缩放:StandardScaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring='accuracy')
Error Analysis¶
- look at the confusion matrix
- plot on the errors
- divide each value in confusion matrix by number of images in the corresopnding class
- fill the diagonals with zeros to keep only the errors
#1-1
y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
conf_mx
plt.matshow(conf_mx, cmap=plt.cm.gray)
plt.show()
most images are on the main diagonal which means that they were classified correctly and 5s is darker means fewer 5s images in the dataset or classifier doesn't perform well
#2-1
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx/row_sums
#2-2
np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plt.show()
- row represent the actual classes
- improve 8s, 9s, 3/5
- count the number of close loops
cl_a, cl_b = 3, 5
X_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]
X_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]
X_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]
X_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]
plt.figure(figsize=(8,8))
plt.subplot(221)
plot_digits(X_aa[:25], images_per_row=5)
plt.subplot(222)
plot_digits(X_ab[:25], images_per_row=5)
plt.subplot(223)
plot_digits(X_ba[:25], images_per_row=5)
plt.subplot(224)
plot_digits(X_bb[:25], images_per_row=5)
plt.show()
看到顶部的直线的底部的弧线中间的连接方式:偏向左边一条直线就是5,偏向右边就是3
Multilabel Classification¶
from sklearn.neighbors import KNeighborsClassifier #support multilabel classification
y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_multilabel)
knn_clf.predict([some_digit])
#evaluate by f1 score
from sklearn.metrics import f1_score
y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_train, cv=3)
f1_score(y_train, y_train_knn_pred, average='macro')
Multioutput Classification¶
import numpy.random as rnd
noise = rnd.randint(0, 100, (len(X_train), 784))
X_train_mod = X_train + noise
y_train_mod = X_train
noise = rnd.randint(0, 100, (len(X_test), 784))
X_test_mod = X_test + noise
y_test_mod = X_test
def plot_digit(data):
image = data.reshape(28, 28)
plt.imshow(image, cmap = matplotlib.cm.binary,
interpolation="nearest")
plt.axis("off")
some_index = 5500
plt.subplot(121); plot_digit(X_test_mod[some_index])
plt.subplot(122); plot_digit(y_test_mod[some_index])
plt.show()
knn_clf.fit(X_train_mod, y_train_mod)
clean_digit=knn_clf.predict([X_test_mod[some_index]])
plot_digit(clean_digit)