15 手写数字识别-小数据集

1.手写数字数据集

  • from sklearn.datasets import load_digits
  • digits = load_digits()

 

2.图片数据预处理

  • x:归一化MinMaxScaler()
  • y:独热编码OneHotEncoder()或to_categorical
  • 训练集测试集划分
  • 张量结构

 

3.设计卷积神经网络结构

  • 绘制模型结构图,并说明设计依据。

 

 

 

4.模型训练

5.模型评价

  • model.evaluate()
  • 交叉表与交叉矩阵
  • pandas.crosstab
  • seaborn.heatma
from sklearn.datasets import load_digits
import numpy as np

# 1.手写数字数据集
digits = load_digits()
x_data = digits.data.astype(np.float32)
y_data = digits.target.astype(np.float32).reshape(-1, 1)
# 2.图片数据预处理
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split

scaler = MinMaxScaler()
x_data = scaler.fit_transform(x_data)
print(x_data)
x = x_data.reshape(-1, 8, 8, 1)  # 转换为图片格式
y = OneHotEncoder().fit_transform(y_data).todense()
# 训练集测试集划分
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0, stratify=y)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)

# 3设计卷积神经网络结构
# 绘制模型结构图
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D

model = Sequential()
ks = [3, 3]  # 卷积核大小
# 一层卷积
model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=x_train.shape[1:], activation='relu'))
# 池化层
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 二层卷积
model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu'))
# 池化层
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 三层卷积
model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu'))
# 四层卷积
model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu'))
# 池化层
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 平坦层
model.add(Flatten())
# 全连接层
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
# 激活函数
model.add(Dense(10, activation='softmax'))
model.summary()

# 4.模型训练
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
train_history = model.fit(x=x_train, y=y_train, validation_split=0.2, batch_size=300, epochs=10, verbose=2)

# 定义训练参数可视化
import matplotlib.pylab as plt


def show_train_history(train_history, train, validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train History')
    plt.ylabel('train')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()

# 准确率
show_train_history(train_history, 'acc', 'val_acc')
# 损失率
show_train_history(train_history, 'loss', 'val_loss')

# 5.模型评价
import pandas as pd

score = model.evaluate(x_test, y_test)[1]
print('模型准确率=', score)
# 预测值
y_pre = model.predict_classes(x_test)
print('预测的y值=', y_pre[:10])

# 交叉表和交叉矩阵
y_test1 = np.argmax(y_test, axis=1).reshape(-1)
y_true = np.array(y_test1)[0]
y_true.shape
# 交叉表查看预测数据与原数据对比
pd.crosstab(y_true, y_pre, rownames=['true'], colnames=['predict'])

# 交叉矩阵
import seaborn as sns

y_test1 = y_test1.tolist()[0]
a = pd.crosstab(np.array(y_test1), y_pre, rownames=['Lables'], colnames=['predict'])
df = pd.DataFrame(a)
print(df)
sns.heatmap(df, annot=True, cmap="Reds", linewidths=0.2, linecolor='G')

 

 

 

 准确率

 

 

损失率

 

 

 

 

 

 

 

posted @ 2020-06-09 22:00  土块  阅读(169)  评论(0编辑  收藏  举报