机器学习之手写数字识别-小数据集
1.手写数字数据集
- from sklearn.datasets import load_digits
- digits = load_digits()
from sklearn.datasets import load_digits digits = load_digits()
2.图片数据预处理
- x:归一化MinMaxScaler()
- y:独热编码OneHotEncoder()或to_categorical
- 训练集测试集划分
- 张量结构
import numpy as np from sklearn.datasets import load_digits from sklearn.preprocessing import MinMaxScaler,OneHotEncoder digits = load_digits() #数据处理 X_data = digits.data.astype(np.float32) Y_data = digits.target.astype(np.float32).reshape(-1,1)#将Y_ data变为一列 scale =MinMaxScaler()#归一化 X_data = scale.fit_transform(X_data) print('MinMaxScaler_trans_X_data:') print(X_data) Y = OneHotEncoder().fit_transform(Y_data).todense()#one-hot处理 print('one-hot_Y:') print(Y) # 转换为图片的格式 X = X_data.reshape(-1,8,8,1) from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,Y,test_size=0.2,random_state=0,stratify=Y) print(X_train.shape,X_test.shape,y_train.shape,y_test.shape)
归一化
one-hot处理
图片格式
划分测试集与训练集
3.设计卷积神经网络结构
4.模型训练
#导入相关包 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPool2D #建立模型 model = Sequential() ks = (3, 3) # 第一层卷积 # 第一层输入数据的shape要指定外,其他层的数据的shape框架会自动推导 model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=X_train.shape[1:], activation='relu')) # 池化层 model.add(MaxPool2D(pool_size=(2, 2))) # 防止过拟合 model.add(Dropout(0.25)) # 第二层卷积 model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu')) # 池化层 model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # 第三层卷积 model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu')) # 第四层卷积 model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu')) # 池化层 model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten())# 平坦层 model.add(Dense(128, activation='relu'))# 全连接层 model.add(Dropout(0.25)) model.add(Dense(10, activation='softmax'))# 激活函数softmax model.summary() model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) train_history = model.fit(x=X_train,y=y_train,validation_split=0.2,batch_size=300,epochs=10,verbose=2) score = model.evaluate(X_test, y_test) print(score)
训练运行结果如下:
5.模型评价
- model.evaluate()
- 交叉表与交叉矩阵
- pandas.crosstab
- seaborn.heatmap
import seaborn as sns import pandas as pd results = model.evaluate(X_test, y_test) print('评估结果:', results) # 预测值 y_pred = model.predict_classes(X_test) print('预测值:', y_pred[:10]) # 交叉表与交叉矩阵 y_test1 = np.argmax(y_test, axis=1).reshape(-1) y_true = np.array(y_test1)[0] # 交叉表查看预测数据与原数据对比 c=pd.crosstab(y_true, y_pred, rownames=['true'], colnames=['predict']) # 交叉矩阵 y_test1 = y_test1.tolist()[0] a = pd.crosstab(np.array(y_test1), y_pred, rownames=['Lables'], colnames=['Predict']) df = pd.DataFrame(a)#转换成属dataframe sns.heatmap(df, annot=True, cmap="RdGy", linewidths=0.2, linecolor='G') plt.show()
交叉表预测数据与原数据
预测数据与原数据热力图如下
具体代码如下:
# -*- coding:utf-8 -*- # 班级:17软件工程一班 # 开发人员:爱飞的大白鲨 # 开发时间:2020/6/8 10:20 # 文件名称:手写数据集及预处理.py import numpy as np from sklearn.datasets import load_digits from sklearn.preprocessing import MinMaxScaler,OneHotEncoder digits = load_digits() #数据处理 X_data = digits.data.astype(np.float32) Y_data = digits.target.astype(np.float32).reshape(-1,1)#将Y_ data变为一列 scale =MinMaxScaler()#归一化 X_data = scale.fit_transform(X_data) print('MinMaxScaler_trans_X_data:') print(X_data) Y = OneHotEncoder().fit_transform(Y_data).todense()#one-hot处理 print('one-hot_Y:') print(Y) # 转换为图片的格式 X = X_data.reshape(-1,8,8,1) from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,Y,test_size=0.2,random_state=0,stratify=Y) print(X_train.shape,X_test.shape,y_train.shape,y_test.shape) #导入相关包 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPool2D #建立模型 model = Sequential() ks = (3, 3) # 第一层卷积 # 第一层输入数据的shape要指定外,其他层的数据的shape框架会自动推导 model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=X_train.shape[1:], activation='relu')) # 池化层 model.add(MaxPool2D(pool_size=(2, 2))) # 防止过拟合 model.add(Dropout(0.25)) # 第二层卷积 model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu')) # 池化层 model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) # 第三层卷积 model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu')) # 第四层卷积 model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu')) # 池化层 model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten())# 平坦层 model.add(Dense(128, activation='relu'))# 全连接层 model.add(Dropout(0.25)) model.add(Dense(10, activation='softmax'))# 激活函数softmax model.summary() model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) train_history = model.fit(x=X_train,y=y_train,validation_split=0.2,batch_size=300,epochs=10,verbose=2) score = model.evaluate(X_test, y_test) print(score) train_history.history import matplotlib.pyplot as plt plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定字体 def show_train_history(train_history, train, validation): plt.plot(train_history.history[train]) plt.plot(train_history.history[validation]) plt.ylabel('train') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() p = plt.figure(figsize=(15, 15)) a1 = p.add_subplot(2, 1, 1) show_train_history(train_history, 'accuracy', 'val_accuracy') plt.title("准确率") a2 = p.add_subplot(2, 1, 2) show_train_history(train_history, 'loss', 'val_loss') plt.title("损失率") plt.show() import seaborn as sns import pandas as pd results = model.evaluate(X_test, y_test) print('评估结果:', results) # 预测值 y_pred = model.predict_classes(X_test) print('预测值:', y_pred[:10]) # 交叉表与交叉矩阵 y_test1 = np.argmax(y_test, axis=1).reshape(-1) y_true = np.array(y_test1)[0] # 交叉表查看预测数据与原数据对比 c=pd.crosstab(y_true, y_pred, rownames=['true'], colnames=['predict']) # 交叉矩阵 y_test1 = y_test1.tolist()[0] a = pd.crosstab(np.array(y_test1), y_pred, rownames=['Lables'], colnames=['Predict']) df = pd.DataFrame(a)#转换成属dataframe sns.heatmap(df, annot=True, cmap="RdGy", linewidths=0.2, linecolor='G') plt.show()