PyTorch-CNN Model

  • 利用卷积神经网络对MINIST数据集进行训练

代码:

import torch
import numpy as np
import torchvision #torch的视觉包
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data.dataloader import DataLoader
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
import PIL.Image as Image
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.optim as optim
root='D:\Project_Encyclopedia'
mnist=torchvision.datasets.MNIST(root,train=True,transform=ToTensor(),target_transform=None,download=False)
bs=8
mnist_loader=torch.utils.data.DataLoader(dataset=mnist,batch_size=bs,shuffle=True)
class CNNModel(nn.Module):
    def __init__(self):   #初始化
        super(CNNModel, self).__init__()  #调用父类
        self.conv1 = nn.Conv2d(1, 20,5)   #二维卷积 输入特征的维数是1 1*28*28 输出为20个特征维度 卷积核为5 
        self.conv2 = nn.Conv2d(20, 12,5)  #输入为20 输出为12  12*8*8
        self.fc1 = nn.Linear(12*4*4, 100,bias=True)   #线性层
        self.fc2 = nn.Linear(100, 10,bias=True)   #线性层
 
    def forward(self, x):
        x=x
        x = self.conv1(x) #前向卷积
        x = F.relu(x)
        x = F.max_pool2d(x,kernel_size=2,stride=2) 
        
        x = self.conv2(x)
        x = F.relu(x)
        x = F.max_pool2d(x,kernel_size=2,stride=2)#取最大值 12*8*8 变成 12*4*4 
        
        x = x.reshape(-1,12*4*4) #转为1维的数据
        x = self.fc1(x)
        x = F.relu(x)
        
        x=self.fc2(x)
        return x
cnnmodel=CNNModel()
print(cnnmodel)
for name,param in cnnmodel.named_parameters():
    print(name,param.shape)
optimizer = torch.optim.SGD(cnnmodel.parameters(), lr=0.1)
for epoch in range(5):
    total_loss=0
    for batch in mnist_loader:
        image,labels=batch
        optimizer.zero_grad()
        out=cnnmodel(image)
        loss=F.cross_entropy(out,labels)
        total_loss+=loss.item() #把数值取出来
        loss.backward()
        optimizer.step() 
    print("epoch:",epoch,"loss:",total_loss)
out.argmax(dim=1)
labels
out.argmax(dim=1).eq(labels).sum().item()
#准确度的评估
optimizer = torch.optim.SGD(cnnmodel.parameters(), lr=0.1)
for epoch in range(5):
    total_loss=0
    total_correct=0
    for batch in mnist_loader:
        image,labels=batch
        optimizer.zero_grad()
        out=cnnmodel(image)
        loss=F.cross_entropy(out,labels)
        total_loss+=loss.item() #把数值取出来
        loss.backward()
        optimizer.step() 
        total_correct+=out.argmax(dim=1).eq(labels).sum().item()
    print("epoch:",epoch,"loss:",total_loss,"acc:",total_correct/60000)

  •  训练好的模型怎么保存下来
#http://www.jianshu.com/p/4905bf8e06e5

可以查看上述的网站 有两种方法可以进行保存

#http://www.jianshu.com/p/4905bf8e06e5
torch.save(cnnmodel,'D:/Project_Encyclopedia/cnnmodel.pkl')
#还有别的保存方法
torch.save(cnnmodel.state_dict(),'D:/Project_Encyclopedia/cnnmodel.pt')

  • Pooling Layer(池化层

在卷积神经网络中,卷积层之间往往会加上一个池化层。池化层可以非常有效地缩小参数矩阵的尺寸,从而减少最后全连层中的参数数量。使用池化层即可以加快计算速度也有防止过拟合的作用。在图像识别领域,有时图像太大,我们需要减少训练参数的数量,它被要求在随后的卷积层之间周期性地引进池化层。池化的唯一目的是减少图像的空间大小。池化在每一个纵深维度上独自完成,因此图像的纵深保持不变。池化层的最常见形式是最大池化,还有平均池化层。即对一块局部的特征做求最大值的操作或者做求平均值的操作。

  • Batch Normalization

1)加快训练速度,这样我们就可以使用较大的学习率来训练网络。

2)提高网络的泛化能力。

3)BN层本质上是一个归一化网络层,可以替代局部响应归一化层(LRN层)。

4)可以打乱样本训练顺序(这样就不可能出现同一张照片被多次选择用来训练)。

1.计算样本均值。

2.计算样本方差。

3.样本数据标准化处理。

  • 卷积神经网络和卷积核的运转

  •  将batch_size改为32 并对网络进行调整

代码:

import torch
import numpy as np
import torchvision #torch的视觉包
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data.dataloader import DataLoader
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
import PIL.Image as Image
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.optim as optim
root='D:\Project_Encyclopedia'
mnist=torchvision.datasets.MNIST(root,train=True,transform=ToTensor(),target_transform=None,download=False)
mnist
data=[d[0].data.cpu().numpy() for d in mnist]
np.mean(data) #整个数据集的均值
np.std(data) #整个数据集的标准差
bs=32
train_dataloader=torch.utils.data.DataLoader(
    dataset=torchvision.datasets.MNIST(root,train=True,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,),(0.3081,))]),
                    target_transform=None,download=False),
    batch_size=bs,
    shuffle=True,
    pin_memory=True
)
test_dataloader=torch.utils.data.DataLoader(
    dataset=torchvision.datasets.MNIST(root,train=False,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,),(0.3081,))]),
                    target_transform=None,download=False),
    batch_size=bs,
    shuffle=True,
    pin_memory=True
)
class CNNModel(nn.Module):
    def __init__(self):   #初始化
        super(CNNModel, self).__init__()  #调用父类
        self.conv1 = nn.Conv2d(1, 20,5,1)   # 1维->20维 卷积核大小为5
        self.conv2 = nn.Conv2d(20, 50,5,1)  # 20维->50维 卷积核大小为5
        self.fc1 = nn.Linear(4*4*50, 500)   #线性层
        self.fc2 = nn.Linear(500, 10)   #线性层
 
    def forward(self, x):
        #x:1*28*28  1代表通道 
        x = F.relu(self.conv1(x)) #前向卷积 28*28 -> (28+1-5)=24 24*24
        x = F.max_pool2d(x,kernel_size=2,stride=2) #12*12
        x = F.relu(self.conv2(x)) #8*8
        x = F.max_pool2d(x,kernel_size=2,stride=2)#4*4 
        x = x.reshape(-1,4*4*50) 
        #或者用 x = x.view(-1,4*4*50) 
        x = F.relu(self.fc1(x))
        x=self.fc2(x)
        return F.log_softmax(x,dim=1)
cnnmodel=CNNModel()
print(cnnmodel)
for name,param in cnnmodel.named_parameters():
    print(name,param.shape)
optimizer = torch.optim.SGD(cnnmodel.parameters(), lr=0.1,momentum=0.5)
for epoch in range(5):
    total_loss=0
    for batch in train_dataloader:
        image,labels=batch
        optimizer.zero_grad()
        out=cnnmodel(image)
        loss=F.cross_entropy(out,labels)
        total_loss+=loss.item() #把数值取出来
        loss.backward()
        optimizer.step() 
    print("epoch:",epoch,"loss:",total_loss)
out.argmax(dim=1)
labels
out.argmax(dim=1).eq(labels).sum().item()
#准确度的评估
optimizer = torch.optim.SGD(cnnmodel.parameters(), lr=0.1,momentum=0.5)
for epoch in range(5):
    total_loss=0
    total_correct=0
    for batch in train_dataloader:
        image,labels=batch
        optimizer.zero_grad()
        out=cnnmodel(image)
        loss=F.cross_entropy(out,labels)
        total_loss+=loss.item() #把数值取出来
        loss.backward()
        optimizer.step() 
        total_correct+=out.argmax(dim=1).eq(labels).sum().item()
    print("epoch:",epoch,"loss:",total_loss,"acc:",total_correct/60000)
#http://www.jianshu.com/p/4905bf8e06e5
torch.save(cnnmodel,'D:/Project_Encyclopedia/cnnmodel.pkl')
#还有别的保存方法
torch.save(cnnmodel.state_dict(),'D:/Project_Encyclopedia/cnnmodel.pt')

 

posted @ 2021-03-28 11:19  司砚章  阅读(494)  评论(0编辑  收藏  举报