Pytorch神经网络构建与训练测试全流程入门

本文介绍几个案例,并总结其步骤的规律,便于理解训练的要点、过程并便于学习:

1. 简单线性回归程序,

2. RNN构建与训练的详细步骤和套路、

3. AlexNet(CNN)采用DataLoader读取数据迭代训练的详细步骤和测试的详细步骤。

----分割线----

最基本的简单神经网络有三种构建方式:

1. 手动构建一个类

2. 用 torch.nn.Sequential()

3. 用 torch.nn.Sequential( OrderedDict )

from torch import nn

# 第1种构建方法,最灵活
class Network(nn.Module):
    def __init__(self):
        super().__init__()
        
        # Inputs to hidden layer linear transformation
        self.hidden = nn.Linear(784, 256)
        # Output layer, 10 units - one for each digit
        self.output = nn.Linear(256, 10)
        
        # Define sigmoid activation and softmax output
        self.sigmoid = nn.Sigmoid()
        self.softmax = nn.Softmax(dim=1)
        
    def forward(self, x):
        # Pass the input tensor through each of our operations
        x = self.hidden(x)
        x = self.sigmoid(x)
        x = self.output(x)
        x = self.softmax(x)
        
        return x
nn1 = Network()
nn1
'''结果:
Network(
  (hidden): Linear(in_features=784, out_features=256, bias=True)
  (output): Linear(in_features=256, out_features=10, bias=True)
  (sigmoid): Sigmoid()
  (softmax): Softmax(dim=1)
)
'''
# 第2种构建方法,Sequential类 input_size = 784 hidden_size = [128, 64] output_size = 10 nn2 = nn.Sequential( nn.Linear(input_size, hidden_size[0]), nn.ReLU(), nn.Linear(hidden_size[0], hidden_size[1]), nn.ReLU(), nn.Linear(hidden_size[1], output_size), nn.Softmax(dim=1) ) nn2 '''结果:
Sequential( (0): Linear(in_features=784, out_features=128, bias=True) (1): ReLU() (2): Linear(in_features=128, out_features=64, bias=True) (3): ReLU() (4): Linear(in_features=64, out_features=10, bias=True) (5): Softmax(dim=1) )
'''

# 第3种构建方法,同样是Sequential类,但是传入字典类型,更加易用 from collections import OrderedDict nn3 = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_size, hidden_size[0])), ('relu1', nn.ReLU()), ('fc2', nn.Linear(hidden_size[0], hidden_size[1])), ('relu2', nn.ReLU()), ('output', nn.Linear(hidden_size[1], output_size)), ('softmax', nn.Softmax(dim=1)) ])) nn3

'''结果:
Sequential(
  (fc1): Linear(in_features=784, out_features=128, bias=True)
  (relu1): ReLU()
  (fc2): Linear(in_features=128, out_features=64, bias=True)
  (relu2): ReLU()
  (output): Linear(in_features=64, out_features=10, bias=True)
  (softmax): Softmax(dim=1)
)
'''

然后查看模型结构的方法分别如下:

nn1 = Network()
nn1
print(nn1.hidden)
print(nn2[2])
print(nn3[4])
print(nn3.output)
'''
Linear(in_features=784, out_features=256, bias=True)
Linear(in_features=128, out_features=64, bias=True)
Linear(in_features=64, out_features=10, bias=True)
Linear(in_features=64, out_features=10, bias=True)
'''

模型训练与测试的全流程。

案例1:最简单的学习模型——线性回归。

## linear regression simply implement
# https://blog.csdn.net/qq_27492735/article/details/89707150
import torch
from torch import nn, optim
from torch.autograd import Variable

# 读取训练数据,这里不读取了,直接定义一个最简单的数据x及其标签y
x = Variable(torch.Tensor([[1, 2], [3, 4], [4, 2]]), requires_grad=False)
y = Variable(torch.Tensor([[3], [7], [6]]), requires_grad=False)
# model constract
def model():
    # 模型
    net = nn.Sequential(
        nn.Linear(2, 4),
        nn.ReLU6(),
        nn.Linear(4, 3),
        nn.ReLU(),
        nn.Linear(3, 1)
    )
    # 优化器与损失函数
    optimizer = optim.Adam(net.parameters(), lr=0.01)
    loss_fun =nn.MSELoss()
    # 迭代步骤
    for i in range(300):
        # 1 前向传播
        out = net(x)
        # 2 计算损失
        loss = loss_fun(out, y)
        print(loss)
        # 3 梯度清零
        optimizer.zero_grad()
        # 4 反向传播
        loss.backward()
        # 5 更新优化器
        optimizer.step()
    # 计算预测值
    print(net(x))
    # 保存训练好的模型(参数)
    # torch.save(net, 'simplelinreg.npy')
    return net
net = model()

以上模型大概是最简单的了,毕竟没有比线性回归更简单的机器学习模型了。

案例2: RNN循环神经网络的搭建以及训练流程。

我通常把pytorch训练神经网络细分为8个步骤,或者3个部分。

第1部分:声明 1 模型 Model、2 损失函数 Loss function、3 优化器 Optimizer

第2部分:读取数据通常都是采用DataLoader做的,不过简单的任务直接用 numpy 定义就行。比较大规模的任务都用DataLoader进行批量处理。4 前向传播。

第3部分:更新参数,通常是固定的三个操作 5 计算损失函数 6 optimizer.zero_grad() 、7 loss.backward()、8 optimizer.step()。

"""
torch.nn.RNN()
input_size:
hidden_size:
num_layers:
nonlinearity: 指定非线性函数的使用[tanh, relu],默认tanh
bias: True default,
dropout:如果非%gui除了最后一层之外其他层输出都会套上一个drouput层
batch_first: if True, Tensor的shape就是(batch, seq, feature),输出也是
bidirectional: False default
"""
import numpy as np
import matplotlib.pyplot as plt

import torch
from torch import nn
# from torch.autograd import Variable

"""
PyTorch基础入门七:PyTorch搭建循环神经网络(RNN)
https://blog.csdn.net/out_of_memory_error/article/details/81456501

Example:曲线拟合。拟合一个cos函数
"""
class RNNCurveFitting(nn.Module):
    def __init__(self, INPUT_SIZE):
        super(RNNCurveFitting, self).__init__()
        self.rnn = nn.RNN(
            input_size=INPUT_SIZE,
            hidden_size=32,
            num_layers=1,
            batch_first=True
        )
        self.out = nn.Linear(32, 1)
        
    def forward(self, x, h_state):
        r_out, h_state = self.rnn(x, h_state)
        outs = []
        for time in range(r_out.size(1)):
            outs.append(self.out(r_out[:, time, :]))
        return torch.stack(outs, dim=1), h_state

# hyper parameters
TIME_STEP=10
INPUT_SIZE=1
LR = 0.02
# Step1 model
model = RNNCurveFitting(INPUT_SIZE)
# Step2 3 loss function and optimizer
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LR)

h_state = None
for step in range(300):
    start, end = step * np.pi, (step+1)*np.pi
    steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)
    x_np = np.sin(steps)
    y_np = np.cos(steps)
    # Step (4) 如果用的DataLoader则不需要这一步骤, read data, from_numpy: 数组转换成张量,所得tensor和原array共享内存
    x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis])
    y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis])
    # x = Variable(torch.from_numpy(x_np[np.newaxis, :, np.newaxis]))
    # y = Variable(torch.from_numpy(y_np[np.newaxis, :, np.newaxis]))
    # Step 4 前向传播
    prediction, h_state = model(x, h_state)
    h_state = h_state.data
    # Step 5 6 7 8
    loss = loss_func(prediction, y)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

plt.plot(steps, y_np.flatten(), 'r-')
plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
plt.show()

3:关于DataLoader的使用。

首先用 torchvision.datasets.MNIST 数据集为例

import torchvision
import torch
import torchvision.transforms as transforms
import torchvision

train_set = torchvision.datasets.MNIST(root="./mnist_data",train=True,download=True)
test_set = torchvision.datasets.MNIST(root="./mnist_data",train=False,download=True)
 
#pil型对象显示
print(test_set.classes)
print(test_set[0])
for i in range(10):
    img,label=test_set[i]
    print(test_set.classes[label])
img.show()

'''
['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']
(<PIL.Image.Image image mode=L size=28x28 at 0x290E792BE80>, 7)
'''

batch_size = 256
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_set,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_set,
                                          batch_size=batch_size,
                                          shuffle=False)

自己构建DataLoader:

1. 如上图,可以直接用 torch.vision.datasets 提供的现成的 DataSet

2. ImageFolder 类读取图像文件夹,其中图像的文件名就是标签

3. 自定义:需要定义本地文件的读取,关键是self属性里定义的读取路径、data路径列表、label列表,然后在__getitem__()中定义文件读取。

# reference:https://blog.csdn.net/weixin_48249563/article/details/114318425
transform = transforms.Compose([transforms.ToTensor(),
                               transforms.Resize([596, 1220])])
class SpectrogramSet(Dataset):
    def __init__(self, transform):
        self._root = "D://datasets/medical-spectrogram/"
        self._classes = ["Crackle_Coarse", "Crackle_Fine", "normal", "Wheezes"]
        self._imgs = list()
        self._label = list()
        self._get_all_imgpath()
        self._transform = transform
        
    def __getitem__(self, index):
        image_path = self._imgs[index]
        image = Image.open(image_path)
        if self._transform is not None:
            image = self._transform(image)
        return image, self._label[index]
    
    def __len__(self):
        return len(self._label)
    
    def _get_all_imgpath(self):
        idx = 0
        for c in self._classes:
            cur_dir = os.path.join(self._root, c)
            cur_imgs = [os.path.join(cur_dir, path) for path in os.listdir(cur_dir)]
            self._imgs.extend(cur_imgs)
            self._label.extend([idx]*len(cur_imgs))
            idx += 1

datasets = SpectrogramSet(transform)
data_loader = torch.utils.data.DataLoader(datasets, batch_size=BATCH_SIZE,shuffle=True)

#  ------- 测试 DataLoader 是否正确 -----------
for i, (inputs, target) in enumerate(data_loader):
    if i == 0:  # 输出一部分看看
        print(inputs.shape)
        print(target)
        plt.figure(figsize=(12, 16))
        for num in range(12):  # 确认图像是否可以正确读取
            plt.subplot(3, 4, num + 1)
            plt.imshow(inputs[num].permute([1, 2, 0]))
            plt.title(target[num], size=13)
            plt.axis('off')
        plt.tight_layout()
        plt.show()
    else:
        break

一种更好的实现方式:

# reference:https://blog.csdn.net/weixin_48249563/article/details/114318425
transform = transforms.Compose([transforms.ToTensor(),
                               transforms.Resize([596, 1220])])
class SpectrogramSet(Dataset):
    def __init__(self, datas, labels, transform):
        self._root = "D://datasets/medical-spectrogram/"
        self._classes = ["Crackle_Coarse", "Crackle_Fine", "normal", "Wheezes"]
        self._imgs = datas
        self._label = labels
        self._transform = transform
        
    def __getitem__(self, index):
        image = Image.open(self._imgs[index])
        if self._transform is not None:
            image = self._transform(image)
        return image, self._label[index]
    
    def __len__(self):
        return len(self._label)

def idx2class():
    return {'0': "Crackle_Coarse", '1':"Crackle_File", '2':"normal", '3':"Wheezes"}

def get_train_valid_test():
    root = "D://datasets/medical-spectrogram/"
    # 0 1 2 3
    classes = ["Crackle_Coarse", "Crackle_Fine", "normal", "Wheezes"]
    imgs = list()
    labels = list()
    idx = 0
    for c in classes:
        cur_dir = os.path.join(root, c)
        cur_paths = [os.path.join(cur_dir, path) for path in os.listdir(cur_dir)]
        imgs.extend(cur_paths)
        labels.extend([idx]*len(cur_paths))
        idx += 1
    # imgs = random.shuffle(imgs)
    # labels = random.shuffle(labels)
    random.shuffle(imgs)
    random.shuffle(labels)
    si = len(imgs)
    train_imgs, val_imgs, test_imgs = [], [], []
    train_labels, val_labels, test_labels = [], [], []
    for img, la in zip(imgs[:int(si*0.8)], labels[:int(si*0.8)]):
        train_imgs.append(img)
        train_labels.append(la)
    for img, la in zip(imgs[int(si*0.8):int(si*0.9)], labels[int(si*0.8):int(si*0.9)]):
        val_imgs.append(img)
        val_labels.append(la)
    for img, la in zip(imgs[int(si*0.9):], labels[int(si*0.9):]):
        test_imgs.append(img)
        test_labels.append(la)
        
    train_loader = torch.utils.data.DataLoader(SpectrogramSet(train_imgs, train_labels, transform),
                    batch_size=BATCH_SIZE,drop_last=True,shuffle=True)
    val_loader = torch.utils.data.DataLoader(SpectrogramSet(val_imgs, val_labels, transform),
                        batch_size=BATCH_SIZE,drop_last=True,shuffle=True)
    test_loader = torch.utils.data.DataLoader(SpectrogramSet(test_imgs, test_labels, transform),
                        batch_size=BATCH_SIZE,shuffle=False)
    loader = {"train": train_loader, "val": val_loader, "test": test_loader}
    return loader

 

案例3:AlexNet

(1) 通过本地数据集构建DataLoader。本地文件目录结构如下:

E:.
├─test
│  ├─cat
│  └─dog
├─train_0
│  ├─cat
│  └─dog
└─val
    ├─cat
    └─dog

所谓DataLoader是一个对数据集进行抽象建模的类,其构造参数为集合,规定好了每次读取的批量batch_size,也在构建时指定需不需要打乱数据。

ImageFolder是通过文件目录创建图像数据集的类,构造参数为 文件夹路径、图像变换操作类的实例。

import os
from PIL import Image
import torch
from torchvision import transforms
from torchvision.datasets import ImageFolder

def get_dataloader():
    BATCH_SIZE = 256
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    path_1 = r"e:/DATAS/catdogclass/train_0"
    trans_1 = transforms.Compose([
        transforms.Resize((65, 65)),
        transforms.ToTensor(),
        normalize,
    ])
    train_set = ImageFolder(root=path_1, transform=trans_1)
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)

    path_2 = r"e:/DATAS/catdogclass/test"
    trans_2 = transforms.Compose([
        transforms.Resize((65, 65)),
        transforms.ToTensor(),
        normalize,
    ])
    test_set = ImageFolder(root=path_2, transform=trans_2)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)

    path_3 = r"e:/DATAS/catdogclass/val"
    valid_set = ImageFolder(root=path_3, transform=trans_2)
    valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)

    print(train_set.classes)
    print(train_set.class_to_idx)

    return train_loader, test_loader, valid_loader

(2) 训练和测试的函数封装。训练和测试区别在于,测试一步到位,不需要梯度信息、不需要反向传播、不需要调整优化器参数。只是前向传播然后计算损失函数而已。

def train_model(model, device, train_loader, optimizer, epoch):
    train_loss = 0
    model.train()
    for batch_index, (data, label) in enumerate(train_loader):
        data, label = data.to(device), label.to(device)
        # Step 6
        optimizer.zero_grad()
        # Step 4 5
        output = model(data)
        loss = loss_fn(output, label)
        # Step 7 8
        loss.backward()
        optimizer.step()
        if batch_index % 400 == 0:
            train_loss = loss.item()
            print('Train Epoch:{}\tbatch_index:{}\ttrain loss:{:.6f}'.format(epoch,batch_index,loss.item()))
    return train_loss

def test_model(model, device, test_loader):
    model.eval()
    correct = 0.0
    test_loss = 0.0
    with torch.no_grad():
        for data, label in test_loader:
            data, label = data.to(device), label.to(device)
            # Step 4 5
            output = model(data)
            test_loss += loss_fn(output, label).item()
            # get result
            pred = output.argmax(dim=1)
            correct += pred.eq(label.view_as(pred)).sum().item()
        test_loss /= len(test_loader.dataset)
        print('Test_average_loss:{:.4f},Accuracy:{:3f}\n'.format(
            test_loss,100*correct/len(test_loader.dataset)
        ))
        acc = 100*correct/len(test_loader.dataset)
        return test_loss, acc

(3) AlexNet模型构建

import torch
import torch.nn as nn


class AlexNet(nn.Module):
    def __init__(self, num_classes=2):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 48, kernel_size=11),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(48, 128, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(128, 192, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(192, 192, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(192, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Linear(6*6*128, 2048),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(2048, 2048),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(2048, num_classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, start_dim=1)  # convert tensor to 1-dim
        x = self.classifier(x)
        return x

(4) 主程序

# necessary dependencies 必要依赖
import os
import torch
from torch import nn, optim
from torch.nn import functional as F
from PIL import Image
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder

# basic hyper parameters 必要的超参数
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
EPOCH = 2
BATCH_SIZE = 256


# 局部变量,存储中间结果,即损失函数值得变化值
reslist = []
Train_Loss_list = []
Valid_Loss_list = []
Valid_Accuracy_list = []
# get DataLoader 获取数据集实例 train_loader, test_loader, valid_loader
= get_dataloader() # 训练步骤 # Step 1 2 3 model = AlexNet().to(DEVICE) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005) loss_fn = F.cross_entropy # 多个Epoch迭代训练 for epoch in range(1, EPOCH + 1): # 训练集训练 train_loss = train_model(model, DEVICE, train_loader, optimizer, epoch) Train_Loss_list.append(train_loss) torch.save(model, os.path.join(model_path, 'catdogclassifi-%s.pth' % epoch)) # 验证集进行验证 test_loss, acc = test_model(model, DEVICE, valid_loader) Valid_Loss_list.append(test_loss) Valid_Accuracy_list.append(acc) reslist.append(test_loss) min_num = min(reslist) min_index = reslist.index(min_num) print('model%s' % (min_index + 1)) print('验证集最高准确率: ') print('{}'.format(Valid_Accuracy_list[min_index]))

(5) 测试集,该步骤和验证集几乎一样,区别在于,模型训练过程中采用验证集进行测试,并在训练过程中反馈信息,引导训练,而训练过程中不可以知道测试集的信息,否则相当于“考试中泄题”了,则最终对训练好的模型进行测试就不会有正确的效果。测试集须在模型完全训练完毕后再使用。

# 取最好的进入测试集进行测试
model = torch.load(r'./pthfiles/catdogclassifi-%s.pth' % (min_index + 1))
model.eval()

accuracy = test_model(model, DEVICE, test_loader)
print('测试集准确率')
print('{}%'.format(accuracy))

res_plot(range(0, EPOCH), Train_Loss_list, ValueError, Valid_Accuracy_list)

 

end

posted @ 2023-07-29 00:28  倦鸟已归时  阅读(268)  评论(0编辑  收藏  举报