15 手写数字识别-小数据集

1.手写数字数据集

  • from sklearn.datasets import load_digits
  • digits = load_digits()

 

导入数据集

from sklearn.datasets import load_digits
digits = load_digits()

 

2.图片数据预处理

  • x:归一化MinMaxScaler()
  • y:独热编码OneHotEncoder()或to_categorical
  • 训练集测试集划分
  • 张量结构

 

# 归一化处理
X_data = digits.data.astype(np.float32)

scaler = MinMaxScaler()
X_data = scaler.fit_transform(X_data)
print("归一化处理X:")
print(X_data)

# 独热编码处理
X = X_data.reshape(-1, 8, 8, 1)
Y_data = digits.target.astype(np.float32).reshape(-1, 1)
Y = OneHotEncoder().fit_transform(Y_data).todense()
print("独热编码处理Y:")
print(Y)
# 训练集划分
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0, stratify=Y)
print('X_train.shape, X_test.shape, y_train.shape, y_test.shape:')
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)

 

 

3.设计卷积神经网络结构

  • 绘制模型结构图,并说明设计依据。
# 建立模型
model = Sequential()
# 定义卷积核的大小
# 后面的padding等参数都设置成一样
ks = (5, 5)
input_shape = X_train.shape[1:]
# 一层卷积
model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=input_shape, activation='relu'))
# 池化层1
model.add(MaxPool2D(pool_size=(2, 2)))
# 防止过拟合,随机丢掉链接
model.add(Dropout(0.25))
# 二层卷积
model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu'))
# 池化层2
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 三层卷积
model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu'))
# 四层卷积
model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu'))
# 池化层3
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 平坦层
model.add(Flatten())
# 全连接层
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
# 激活函数
model.add(Dense(10, activation='softmax'))
# 输出模型每一层的参数状况
print(model.summary())

设置了一个ks为(5,5)的卷积核,4次防止过拟合的丢失链接,四层卷积层,三层池化层,一层平坦层,一层全连接

 

 

 

 

 

 

 

4.模型训练

  • model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
  • train_history = model.fit(x=X_train,y=y_train,validation_split=0.2, batch_size=300,epochs=10,verbose=2)
# 可视化模型
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
train_history = model.fit(x=X_train, y=Y_train, validation_split=0.2, batch_size=300, epochs=10, verbose=2)
# 可视化绘图
def show_train_history(train_history, train, validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train History')
    plt.ylabel('train')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()
# 准确率
show_train_history(train_history, 'accuracy', 'val_accuracy')
# 损失率
show_train_history(train_history, 'loss', 'val_loss')

 

 

 

 

5.模型评价

  • model.evaluate()
  • 交叉表与交叉矩阵
  • pandas.crosstab
  • seaborn.heatmap
# 模型评价
score = model.evaluate(X_test, Y_test)
print('准确率为', score)
Y_pre = model.predict_classes(X_test)
print('Y_pred:', Y_pre[:10])
# 交叉表与交叉矩阵
Y_test1 = np.argmax(Y_test, axis=1).reshape(-1)
Y_true = np.array(Y_test1)[0]
# 与原数据对比
pd.crosstab(Y_true, Y_pre, rownames=['true'], colnames=['predict'])
# 交叉矩阵
Y_test1 = Y_test1.tolist()[0]
a = pd.crosstab(np.array(Y_test1), Y_pre, rownames=['Lables'], colnames=['Predict'])
df = pd.DataFrame(a)
sns.heatmap(df, annot=True, cmap="BrBG_r", linewidths=0.2, linecolor='G')
plt.show()

 

 

posted @ 2020-06-11 11:42  Seraooo  阅读(229)  评论(0编辑  收藏  举报