pytorch练习

自动求导

计算图

关于计算图的内容,可以参考[1]

变量加入与去除计算图

pytorch中的tensor会有一个requires_grad属性,当设定为True时则标记为追踪计算并计算梯度,可以通过tensor.requires_grad_(False)来改变标记,默认为False。也可以使用tensor.detach_()从计算图中去除,detach()方法不会改变自身属性。也可以使用with torch.no_grad():这样的上下文管理器,对当期计算图不进行求导计算,这个方法可以使用在模型评估上。

求导

tensor中有一个.grad属性,当计算完成,对计算结果使用.backward()方法时,requires_gradTrue的变量当前的梯度信息都会累加到.grad属性。

如果计算结果为向量,那么backward方法还需要传入求导的方向参数。

Custom Network

构造custom网络

自定义网络需要继承自nn.Module模块,并实现forward函数。网络搭建完成后,可以训练的参数都存放在parameters属性中,parameters是一个生成器,按照网络结构顺序排列。

参数更新

在确定optimizer和criterion的时候,可以用backward()optimizer.step()来更新一步参数。

需要注意的是,nn.Module模块仅支持batch的输入形式。

Custom Auto Grad Function

custom atuo grad函数需要继承torch.autograd.Function,并实现forwardbackward两个方法。

Custom Dataloader

custom dataset需要实现__len____getitem__两个方法,custom dataloader需要实现__len____iter__两个方法。

实例一(FCN)

这里复现了一个用以图像分割的FCN(忘记从哪里找的了,找到了会加入参考资料里)。

# Custom dataset
from PIL import Image
from torchvision import transforms as T
from torch.utils.data import Dataset
from glob import glob
import os
import numpy as np
import matplotlib.pyplot as plt


class CustomDataset(Dataset):
    def __init__(self, image_path = "data/BagImages", mode = "train"):
        assert mode in ("train", "val", "test")
        self.image_path = image_path
        self.image_list = glob(os.path.join(self.image_path, "*.jpg"))
        self.mode = mode

        if mode in ("train", "val"):
            self.mask_path = self.image_path + "Masks"

        self.transform_x = T.Compose([T.Resize((256, 256)), T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])  # transform
        self.transform_mask = T.Compose([ T.ToTensor()])

    def __getitem__(self, index):
        if self.mode in ("train", "val"):
            image_name = self.image_list[index].split("/")[-1].split(".")[0]
            X = Image.open(self.image_list[index])
            
            mask = np.array(Image.open(os.path.join(self.mask_path, image_name+".jpg")).convert('1').resize((256, 256)))
            masks = np.zeros((mask.shape[0], mask.shape[1], 2), dtype=np.uint8)
            masks[:, :, 0] = mask
            masks[:, :, 1] = ~mask

            X = self.transform_x(X)
            masks = self.transform_mask(masks) * 255
            return X, masks
        
        else:
            X = Image.open(self.image_list[index])
            X = self.transform_x(X)
            path = self.image_list[index]
            return X, path

    def __len__(self):
        return len(self.image_list)
# Network

import torch.nn as nn
from model.vgg import VGG
import torch


class FCN32s(nn.Module):
    def __init__(self, num_classes, backbone="vgg"):
        super(FCN32s, self).__init__()
        self.num_classes = num_classes
        if backbone == "vgg":
            self.features = VGG()

        # deconv1 1/16
        self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, output_padding=1)
        self.bn1 = nn.BatchNorm2d(512)
        self.relu1 = nn.ReLU()

        # deconv1 1/8
        self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, output_padding=1)
        self.bn2 = nn.BatchNorm2d(256)
        self.relu2 = nn.ReLU()

        # deconv1 1/4
        self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, output_padding=1)
        self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = nn.ReLU()

        # deconv1 1/2
        self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)
        self.bn4 = nn.BatchNorm2d(64)
        self.relu4 = nn.ReLU()

        # deconv1 1/1
        self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1)
        self.bn5 = nn.BatchNorm2d(32)
        self.relu5 = nn.ReLU()

        self.classifier = nn.Conv2d(32, num_classes, kernel_size=1)

    def forward(self, x):
        features = self.features(x)

        y = self.bn1(self.relu1(self.deconv1(features[4])))

        y = self.bn2(self.relu2(self.deconv2(y)))

        y = self.bn3(self.relu3(self.deconv3(y)))

        y = self.bn4(self.relu4(self.deconv4(y)))

        y = self.bn5(self.relu5(self.deconv5(y)))

        y = self.classifier(y)

        return y
# train

CUDA = torch.cuda.is_available()

def train(**kwargs):
    mymodel = kwargs["mymodel"]
    criterion = kwargs["criterion"]
    data_loader = kwargs["data_loader"]
    optimizer = kwargs["optimizer"]
    epoch = kwargs["epoch"]
    save_freq = kwargs["save_freq"]
    save_dir = kwargs["save_dir"]
    verbose = kwargs["verbose"]

    start_time = time.time()
    logging.info("Epoch %03d, Learning Rate %g" % (epoch + 1, optimizer.param_groups[0]["lr"]))
    mymodel.train()

    epoch_loss = 0.0
    batches = 0
    for i, sample in enumerate(data_loader):
        image, target = sample
        if CUDA:
            image = image.cuda()
            target = target.cuda()

        optimizer.zero_grad()
        output = mymodel(image)
        loss = criterion(output, target)
        
        loss.backward()
        optimizer.step()

        epoch_loss += loss.item()		
        batches += 1
        
        if (i + 1) % verbose == 0:
            logging.info('Training Loss: %.6f' % epoch_loss / batches)
            logging.info('')

    # save checkpoint model
    if epoch % save_freq == 0:
        state_dict = mymodel.module.state_dict()
        for key in state_dict.keys():
            state_dict[key] = state_dict[key].cpu()

        torch.save({
            'epoch': epoch,
            'save_dir': save_dir,
            'state_dict': state_dict,},
            os.path.join(save_dir, '%03d.ckpt' % (epoch + 1)))

    end_time = time.time()
    logging.info('Batch Loss: %.6f Time: %d s' % (epoch_loss / batches, end_time - start_time))
# main

def main(hyper_parameter=hyper_parameter):
    # training
    start_epoch = 0
    mymodel = FCNs(hyper_parameter["num_classes"], hyper_parameter["back_bone"])

    if hyper_parameter["ckpt"]:
        ckpt = hyper_parameter["ckpt"]

        if hyper_parameter["initial_training"] == 0:
            epoch_name = (ckpt.split('/')[-1]).split('.')[0]
            start_epoch = int(epoch_name)

        checkpoint = torch.load(ckpt)
        state_dict = checkpoint["state_dict"]

        mymodel.load_state_dict(state_dict)
        logging.info(f'Model loaded from {hyper_parameter["ckpt"]}')

    save_dir = hyper_parameter["save_dir"]
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)

    # CUDA
    if CUDA:
        mymodel.to(torch.device("cuda"))
        mymodel = nn.DataParallel(mymodel)

    custom_dataset = CustomDataset()
    test_set = CustomDataset("data/testImages", mode="test")

    train_size = int(0.9 * len(custom_dataset))
    val_size = len(custom_dataset) - train_size
    train_set, val_set = random_split(custom_dataset, [train_size, val_size])

    train_loader = DataLoader(train_set, batch_size=hyper_parameter["batch_size"], shuffle=True)
    val_loader = DataLoader(val_set, batch_size=hyper_parameter["batch_size"], shuffle=False)
    test_loader = DataLoader(test_set, batch_size=hyper_parameter["batch_size"], shuffle=False)

    if hyper_parameter["mode"] == "test":
        test(mymodel=mymodel,
                data_loader=test_loader)
        return

    optimizer = torch.optim.Adam(mymodel.parameters(), lr=hyper_parameter["lr"])
    criterion = nn.BCEWithLogitsLoss()

    logging.info('Start training: Total epochs: {}, Batch size: {}, Training size: {}, Validation size: {}'.
                    format(hyper_parameter["epochs"], hyper_parameter["batch_size"], len(train_set), len(val_set)))

    for epoch in tqdm(range(start_epoch, hyper_parameter["epochs"])):
        train(  epoch=epoch,
                data_loader=train_loader, 
                mymodel=mymodel,
                criterion=criterion,
                optimizer=optimizer,
                save_freq=hyper_parameter["save_freq"],
                save_dir=hyper_parameter["save_dir"],
                verbose=hyper_parameter["verbose"])

        validate(data_loader=val_loader,
                mymodel=mymodel,
                criterion=criterion,
                verbose=hyper_parameter["verbose"])
        # scheduler.step()

实例二(LSTM)

使用LSTM做的分类(关于LSTM可阅读[4]),一些需要注意的地方:

  1. CrossEntropyLoss对预测仅支持Long int类型
  2. T.Tensor()要求维度是2/3
  3. DataLoader会莫名多出来一个维度
  4. CrossEntropyLoss要求真实标签的维度为1
  5. 多分类的精准率,召回率与二分类有所不同,详细请参考[3]
import scipy.io as scio
import time
import logging
import numpy as np

# hyper parameters
OPTIONS = {'batch_size':64, 'lr':0.01}
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

# prepare dataset wrapped in pytorch
from torchvision import transforms as T
from torch.utils.data import Dataset
import torch

class CustomDataset(Dataset):
    def __init__(self, mode = "train"):
        assert mode in ("train", "test")
        self.mode = mode
        self.dataset = eval("".join([self.mode, "_dataset"]))
        
        self.transform = T.Compose([T.ToTensor()]) 

    def __getitem__(self, index):
        if self.mode == "train":            
            X = self.dataset['X'][index]
            Y = self.dataset['Y'][index]

            X = self.transform(X)
            Y = torch.from_numpy(Y).long()  # crossentropy only support long int type
            return X, Y
        
        else:
            X = self.dataset['X'][index]
            Y = self.dataset['Y'][index]

            X = self.transform(X)
            Y = torch.from_numpy(Y).long()
            return X, Y

    def __len__(self):
        return len(self.dataset['Y'])

from torch.utils.data import DataLoader

torch.manual_seed(seed=1)
train_set = CustomDataset()
test_set = CustomDataset(mode="test")
train_loader = DataLoader(train_set, batch_size=OPTIONS["batch_size"], shuffle=True)
test_loader = DataLoader(test_set, batch_size=OPTIONS["batch_size"], shuffle=True)

# LSTM for classification
# Model
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
class CustomModel(nn.Module):
    
    def __init__(self, input_size, hidden_size, output_size, num_layer=1):
        super(CustomModel, self).__init__()
        self.lstm = nn.LSTM(input_size=input_size,
                            hidden_size=hidden_size,
                            num_layers=num_layer, batch_first=True)  # if batch_first is True, input shape should be (batch, seq_length, feature)
        self.out = nn.Linear(hidden_size, output_size)
    
    def forward(self, x):
        output, (h_n, c_m) = self.lstm(x)
        predict_y = self.out(output[:, -1, :])
        return predict_y  # don't need softmax function if using crossentropy loss

# Training
def train(model, loss_fn, epochs, train_loader, optimizer):
    for epoch in range(epochs):
        epoch_loss = 0.0
        batches = 0
        start_time = time.time()
        
        for i, data in enumerate(train_loader):
            x , y = data
            x = torch.squeeze(x)  
            y = torch.squeeze(y)  # shape should be 1
            
            optimizer.zero_grad()
            output = model(x)
            loss = loss_fn(output, y)
            loss.backward()
            optimizer.step()
            
            epoch_loss += loss.item()
            batches += 1
            
        logging.info('In epoch %d, training Loss: %.6f' % (epoch, epoch_loss / batches))

参考资料

  1. 吴立德, 数值优化, 2015
  1. pytorch教程
  1. hfutdog, sklearn计算准确率、精确率、召回率、F1 score
  1. ymmy, LSTM细节分析理解(pytorch版)
posted @ 2020-05-02 00:36  Neo_DH  阅读(347)  评论(0编辑  收藏  举报