1.手写数字数据集

  • from sklearn.datasets import load_digits
  • digits = load_digits()

 from sklearn.datasets import load_digits

import numpy as np

import pandas as pd

from sklearn.preprocessing import MinMaxScaler

from sklearn.preprocessing import OneHotEncoder

digits = load_digits()

X_data = digits.data.astype(np.float32)

#将Y_data变为一列

Y_data = digits.target.astype(np.float32).reshape(-1,1)

 

 

2.图片数据预处理

  • x:归一化MinMaxScaler()
  • y:独热编码OneHotEncoder()或to_categorical
  • 训练集测试集划分
  • 张量结构

 # 将属性缩放到一个指定的最大和最小值(通常食0-1)之间

scaler = MinMaxScaler()

X_data = scaler.fit_transform(X_data)

print('MinMaxScaler_trans_X_data:')

print(X_data)

 

 

Y = OneHotEncoder().fit_transform(Y_data).todense()  # one-hot编码

print('ne-hot_Y:')

print(Y)

 

 

# 转换为图片的格式(batch,height,width,channels)

X = X_data.reshape(-1, 8, 8, 1)

from sklearn.model_selection import train_test_split

#训练集测试值划分

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0, stratify=Y)

print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)

 

 

3.设计卷积神经网络结构

  • 绘制模型结构图,并说明设计依据。

 

 

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D

 

# 建立模型

model = Sequential()

ks = (3, 3)  # 卷积核的大小

input_shape = X_train.shape[1:]

# 一层卷积,padding='same',tensorflow会对输入自动补0

model.add(Conv2D(filters=16, kernel_size=ks, padding='same', input_shape=input_shape,

                 activation='relu'))  # 第一层输入数据的shape要指定外,其他层的数据的shape框架会自动推导

# 池化层1

model.add(MaxPool2D(pool_size=(2, 2)))

# 防止过拟合,随机丢掉连接

model.add(Dropout(0.25))

# 二层卷积

model.add(Conv2D(filters=32, kernel_size=ks, padding='same', activation='relu'))

# 池化层2

model.add(MaxPool2D(pool_size=(2, 2)))

model.add(Dropout(0.25))

# 三层卷积

model.add(Conv2D(filters=64, kernel_size=ks, padding='same', activation='relu'))

# 四层卷积

model.add(Conv2D(filters=128, kernel_size=ks, padding='same', activation='relu'))

# 池化层3

model.add(MaxPool2D(pool_size=(2, 2)))

model.add(Dropout(0.25))

# 平坦层

model.add(Flatten())

# 全连接层

model.add(Dense(128, activation='relu'))

model.add(Dropout(0.25))

# 激活函数softmax

model.add(Dense(10, activation='softmax'))

model.summary()

 

 

 

 

4.模型训练

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

train_history = model.fit(x=X_train, y=Y_train, validation_split=0.2, batch_size=300, epochs=10, verbose=2)

score = model.evaluate(X_test, Y_test)

score

import matplotlib.pyplot as plt

def show_train_history(train_history, train, validation):

    plt.plot(train_history.history[train])

    plt.plot(train_history.history[validation])

    plt.title('Train History')

    plt.ylabel('train')

    plt.xlabel('epoch')

    plt.legend(['train', 'validation'], loc='upper left')

    plt.show()

p = plt.figure(figsize=(15, 15))

a1 = p.add_subplot(2, 1, 1)

show_train_history(train_history, 'accuracy', 'val_accuracy')

plt.title("准确率")

a2 = p.add_subplot(2, 1, 2)

show_train_history(train_history, 'loss', 'val_loss')

plt.title("损失率")

plt.show()

 

 

 

 

5.模型评价

  • model.evaluate()
  • 交叉表与交叉矩阵
  • pandas.crosstab
  • seaborn.heatmap

import seaborn as sns

score = model.evaluate(X_test, Y_test)

print('score', score)

# 预测值

y_pred = model.predict_classes(X_test)

print('y_pred', y_pred[:10])

# 交叉表与交叉矩阵

y_test1 = np.argmax(Y_test, axis=1).reshape(-1)

y_true = np.array(y_test1)[0]

# 交叉表查看预测数据与原数据对比

pd.crosstab(y_true, y_pred, rownames=['true'], colnames=['predict'])

# 交叉矩阵

y_test1 = y_test1.tolist()[0]

a = pd.crosstab(np.array(y_test1), y_pred, rownames=['Lables'], colnames=['Predict'])

df = pd.DataFrame(a)

sns.heatmap(df, annot=True, cmap="Reds", linewidths=0.2, linecolor='G')

plt.show()