使用cnn,bpnn,lstm实现mnist数据集的分类

1.cnn

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms

# 设置随机数种子
torch.manual_seed(0)

# 超参数
EPOCH = 1  # 训练整批数据的次数
BATCH_SIZE = 50
DOWNLOAD_MNIST = False  # 表示还没有下载数据集,如果数据集下载好了就写False

# 加载 MNIST 数据集
train_dataset = datasets.MNIST(
    root="./mnist",
    train=True,#True表示是训练集
    transform=transforms.ToTensor(),
    download=False)
test_dataset = datasets.MNIST(
    root="./mnist",
    train=False,#Flase表示测试集
    transform=transforms.ToTensor(),
    download=False)

# 将数据集放入 DataLoader 中
train_loader = torch.utils.data.DataLoader(
    dataset=train_dataset,
    batch_size=100,#每个批次读取的数据样本数
    shuffle=True)#是否将数据打乱,在这种情况下为True,表示每次读取的数据是随机的
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=100, shuffle=False)

# 为了节约时间, 我们测试时只测试前2000个
test_x = torch.unsqueeze(test_dataset.test_data, dim=1).type(torch.FloatTensor)[
         :2000] / 255.  # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_dataset.test_labels[:2000]

# 定义卷积神经网络模型
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(#输入图像的大小为(28,28,1)
            in_channels=1,#当前输入特征图的个数
            out_channels=32,#输出特征图的个数
            kernel_size=3,#卷积核大小,在一个3*3空间里对当前输入的特征图像进行特征提取
            stride=1,#步长:卷积窗口每隔一个单位滑动一次
            padding=1)#如果希望卷积后大小跟原来一样,需要设置padding=(kernel_size-1)/2
        #第一层结束后图像大小为(28,28,32)32是输出图像个数,28计算方法为(h-k+2p)/s+1=(28-3+2*1)/1 +1=28
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)#可以缩小输入图像的尺寸,同时也可以防止过拟合
        #通过池化层之后图像大小变为(14,14,32)
        self.conv2 = nn.Conv2d(#输入图像大小为(14,14,32)
            in_channels=32,#第一层的输出特征图的个数当做第二层的输入特征图的个数
            out_channels=64,
            kernel_size=3,
            stride=1,
            padding=1)#二层卷积之后图像大小为(14,14,64)

        self.fc = nn.Linear(64 * 7 * 7, 10)#10表示最终输出的

    # 下面定义x的传播路线
    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))# x先通过conv1
        x = self.pool(F.relu(self.conv2(x)))# 再通过conv2
        x = x.view(-1, 64 * 7 * 7)
        x = self.fc(x)
        return x

# 实例化卷积神经网络模型
model = CNN()

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
#lr(学习率)是控制每次更新的参数的大小的超参数
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

# 训练模型
for epoch in range(1):
    for i, (images, labels) in enumerate(train_loader):
        outputs = model(images)  # 先将数据放到cnn中计算output
        loss = criterion(outputs, labels)# 输出和真实标签的loss,二者位置不可颠倒
        optimizer.zero_grad()# 清除之前学到的梯度的参数
        loss.backward()  # 反向传播,计算梯度
        optimizer.step()#应用梯度
        if i % 50 == 0:
            data_all = model(test_x)#不分开写就会出现ValueError: too many values to unpack (expected 2)
            last_layer = data_all
            test_output = data_all
            pred_y = torch.max(test_output, 1)[1].data.numpy()
            accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0))
            print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.4f' % accuracy)

# print 10 predictions from test data
data_all1 = model(test_x[:10])
test_output = data_all1
_ = data_all1
pred_y = torch.max(test_output, 1)[1].data.numpy()
print(pred_y, 'prediction number')
print(test_y[:10].numpy(), 'real number')

2.bpnn

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torchvision
DOWNLOAD_MNIST = False  # 表示还没有下载数据集,如果数据集下载好了就写False
BATCH_SIZE = 50
LR = 0.01  # 学习率
# 下载mnist手写数据集
train_loader = torchvision.datasets.MNIST(
    root='./mnist/',  # 保存或提取的位置  会放在当前文件夹中
    train=True,  # true说明是用于训练的数据,false说明是用于测试的数据
    transform=torchvision.transforms.ToTensor(),  # 转换PIL.Image or numpy.ndarray

    download=DOWNLOAD_MNIST,  # 已经下载了就不需要下载了
)

test_loader = torchvision.datasets.MNIST(
    root='./mnist/',
    train=False  # 表明是测试集
)
train_data = torch.utils.data.DataLoader(dataset=train_loader, batch_size=BATCH_SIZE, shuffle=True)

# 为了节约时间, 我们测试时只测试前2000个
test_x = torch.unsqueeze(test_loader.test_data, dim=1).type(torch.FloatTensor)[
         :2000] / 255.  # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_loader.test_labels[:2000]

# 定义模型
class BPNN(nn.Module):
    def __init__(self):
        super(BPNN, self).__init__()
        self.fc1 = nn.Linear(28 * 28, 512)#定义了一个全连接层fc1,该层的输入是28 * 28个数字,输出是512个数字
        self.fc2 = nn.Linear(512, 512)
        self.fc3 = nn.Linear(512, 10)

    def forward(self, x):#x是输入的图像
        x = x.view(-1, 28 * 28)#将输入x的形状转换为二维,分别是batch_size和28 * 28
        x = F.relu(self.fc1(x))#将x通过第1个全连接层fc1进行计算,并将结果通过ReLU激活函数处理
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        #Softmax函数是一种分类模型中常用的激活函数,它能将输入数据映射到(0,1)范围内,并且满足所有元素的和为1
        return F.log_softmax(x, dim=1)#dim=1表示对每一行的数据进行运算

# 初始化模型
bpnn = BPNN()
print(bpnn)
# 定义损失函数和优化器
optimizer = torch.optim.Adam(bpnn.parameters(), lr=LR)  # optimize all parameters
loss_func = nn.CrossEntropyLoss()  # the target label is not one-hotted
#
# criterion = nn.NLLLoss()
# optimizer = optim.SGD(bpnn.parameters(), lr=0.01, momentum=0.5)

# 训练模型
for epoch in range(1):
    for step, (b_x,b_y) in enumerate(train_data):
        b_x = b_x.view(-1, 28, 28)  # reshape x to (batch, time_step, input_size)

        output = bpnn(b_x)
        loss = loss_func(output, b_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if step % 50 == 0:
            test_x = test_x.view(-1, 28, 28)
            test_output = bpnn(test_x)
            pred_y = torch.max(test_output, 1)[1].data.squeeze()
            acc = (pred_y == test_y).sum().float() / test_y.size(0)
            print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.float(), 'test acc: ', acc.numpy())

test_output = bpnn(test_x[:10].view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'prediction number')
print(test_y[:10], 'real number')
# # 评估模型
# bpnn.eval()
# correct = 0
# with torch.no_grad():
#     for data, target in test_loader:
#         output = bpnn(data)
#         pred = output.argmax(dim=1, keepdim=True)
#         correct += pred.eq(target.view_as(pred)).sum().item()
#
# print('Test accuracy:', correct / len(test_loader.dataset))


3.lstm

import torch
from torch import nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np

torch.manual_seed(1)  # reproducible

# Hyper Parameters
EPOCH = 1  # 训练整批数据多少次, 为了节约时间, 我们只训练一次
BATCH_SIZE = 64
TIME_STEP = 28  # rnn 时间步数 / 图片高度
INPUT_SIZE = 28  # rnn 每步输入值 / 图片每行像素
LR = 0.01  # learning rate
DOWNLOAD_MNIST = False  # 如果你已经下载好了mnist数据就写上 Fasle

# Mnist 手写数字
train_data = dsets.MNIST(
    root='./mnist/',  # 保存或者提取位置
    train=True,  # this is training data
    transform=transforms.ToTensor(),  # 转换 PIL.Image or numpy.ndarray 成
    # torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间
    download=DOWNLOAD_MNIST,  # 没下载就下载, 下载了就不用再下了
)

test_data = dsets.MNIST(root='./mnist/', train=False)

# 批训练 50samples, 1 channel, 28x28 (50, 1, 28, 28)
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)

# 为了节约时间, 我们测试时只测试前2000个
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[
         :2000] / 255.  # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_data.test_labels[:2000]

#LSTM默认input(seq_len,batch,feature)
class Lstm(nn.Module):
    def __init__(self):
        super(Lstm, self).__init__()

        self.Lstm = nn.LSTM(  # LSTM 效果要比 nn.RNN() 好多了
            input_size=28,  # 图片每行的数据像素点,输入特征的大小
            hidden_size=64,  # lstm模块的数量相当于bp网络影藏层神经元的个数
            num_layers=1,  # 隐藏层的层数
            batch_first=True,  # input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
        )

        self.out = nn.Linear(64, 10)  # 输出层,接入线性层

    def forward(self, x):  # 必须有这个方法
        # x shape (batch, time_step, input_size)
        # r_out shape (batch, time_step, output_size)包含每个序列的输出结果
        # h_n shape (n_layers, batch, hidden_size)只包含最后一个序列的输出结果,LSTM 有两个 hidden states, h_n 是分线, h_c 是主线
        # h_c shape (n_layers, batch, hidden_size)只包含最后一个序列的输出结果
        r_out, (h_n, h_c) = self.Lstm(x, None)  # None 表示 hidden state 会用全0的 state
        # 当RNN运行结束时刻,(h_n, h_c)表示最后的一组hidden states,这里用不到

        # 选取最后一个时间点的 r_out 输出
        # 这里 r_out[:, -1, :] 的值也是 h_n 的值
        out = self.out(r_out[:, -1, :])  # (batch_size, time step, input),这里time step选择最后一个时刻
        # output_np = out.detach().numpy()  # 可以使用numpy的sciview监视每次结果
        return out


Lstm = Lstm()
print(Lstm)

optimizer = torch.optim.Adam(Lstm.parameters(), lr=LR)  # optimize all parameters
loss_func = nn.CrossEntropyLoss()  # the target label is not one-hotted

# training and testing
for epoch in range(EPOCH):
    for step, (x, b_y) in enumerate(train_loader):  # gives batch data
        b_x = x.view(-1, 28, 28)  # reshape x to (batch, time_step, input_size)

        output = Lstm(b_x)  # rnn output
        loss = loss_func(output, b_y)  # cross entropy loss
        optimizer.zero_grad()  # clear gradients for this training step
        loss.backward()  # backpropagation, compute gradients
        optimizer.step()  # apply gradients

        # output_np = output.detach().numpy()

        if step % 50 == 0:
            test_x = test_x.view(-1, 28, 28)
            test_output = Lstm(test_x)
            pred_y = torch.max(test_output, 1)[1].data.squeeze()
            acc = (pred_y == test_y).sum().float() / test_y.size(0)
            print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.float(), 'test acc: ', acc.numpy())

test_output = Lstm(test_x[:10].view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'prediction number')
print(test_y[:10], 'real number')


posted @ 2023-02-13 09:15  剑断青丝ii  阅读(193)  评论(0编辑  收藏  举报