猫狗数据集分类VGG16(PyTorch实现)

数据集地址:https://www.kaggle.com/datasets/shaunthesheep/microsoft-catsvsdogs-dataset

```python
from shutil import copyfile
import random
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.optim as optim
import os
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
import datetime
import torchvision
import torch
import torchvision.transforms as T

# import cv2

torch.cuda.empty_cache()
torch.backends.cudnn.benchmark = True
torch.cuda.set_per_process_memory_fraction(0.95, 0)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('PyTorch版本:' + torch.__version__)
print('CUDA版本:' + torch.version.cuda)
print('CUDNN版本:' + str(torch.backends.cudnn.version()))
print('设备名称:' + torch.cuda.get_device_name(0))
PyTorch版本:1.11.0
CUDA版本:11.3
CUDNN版本:8200
设备名称:NVIDIA GeForce RTX 3060 Laptop GPU
def walk_through_dir(directory_name):  # 输出路径下的目录和目录中的文件
    for dirpaths, dirnames, filenames in os.walk(directory_name):
        print(f"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpaths}'")
input_data_dir = './PetImages'  # 当前数据目录
walk_through_dir(input_data_dir)  # 查看当前数据路径下的目录和目录中的文件
There are 2 directories and 0 images in './PetImages'
There are 0 directories and 12501 images in './PetImages\Cat'
There are 0 directories and 12501 images in './PetImages\Dog'
print("文件目录结构:")
!tree./ PetImages
文件目录结构:
卷 新加卷 的文件夹 PATH 列表
卷序列号为 44A6-CC9A
D:\DOCUMENTS\PYTORCH深度学习实战\PETIMAGES
├─Cat
└─Dog
os.mkdir('./data')  # 创建目录来存放训练数据和测试数据
os.mkdir('./data/train')
os.mkdir('./data/test')
for folder in os.listdir(input_data_dir):  # 遍历cat和dog目录
    files = os.listdir(os.path.join(input_data_dir, folder))  # 拼接得到图像文件的父目录
    images = []  # 创建列表来存储图像路径
    for f in files:  # 遍历Dog/Cat目录下的图像
        try:
            img = Image.open(os.path.join(input_data_dir, folder, f)).convert("RGB")  # 如果图像能够打开则添加到路径列表
            images.append(f)
        except IOError:  # 如果发生输入输出错误则输出发生错误的文件,并跳过该文件
            print(f'fail on {f}')
            pass

    random.shuffle(images)  # 将列表中的路径打乱
    count = len(images)  # Cat/Dog目录下的总图片数
    split = int(0.8 * count)  # 选取其中80%作为训练集
    os.mkdir(os.path.join('./data/train', folder))  # 创建训练集下的Dog/Cat目录
    os.mkdir(os.path.join('./data/test', folder))  # 创建测试集下的Dog/Cat目录

    for c in range(split):
        source_file = os.path.join(input_data_dir, folder, images[c])  # 得到训练集源文件的路径
        distination = os.path.join('./data/train', folder, images[c])  # 创建目标路径
        copyfile(source_file, distination)  # 将训练集路径下的文件放到训练集中
    for c in range(split, count):
        source_file = os.path.join(input_data_dir, folder, images[c])  # 得到测试集源文件的路径
        distination = os.path.join('./data/test', folder, images[c])  # 创建目标存放路径
        copyfile(source_file, distination)  # 将测试集路径下的文件放到测试集中
train_dir = './data/train'  # 生成的训练集文件目录
walk_through_dir(train_dir)  # 查看训练集目录
test_dir = './data/test'  # 生成的测试集文件目录
walk_through_dir(test_dir)  # 查看测试集目录
There are 2 directories and 0 images in './data/train'
There are 0 directories and 9999 images in './data/train\Cat'
There are 0 directories and 9999 images in './data/train\Dog'
There are 2 directories and 0 images in './data/test'
There are 0 directories and 2500 images in './data/test\Cat'
There are 0 directories and 2500 images in './data/test\Dog'
# 设置测试集的图像处理
train_transforms = T.Compose([
    T.RandomResizedCrop(224),  # 随机裁剪一个区域,然后重塑形状到(224,224)
    T.RandomHorizontalFlip(0.5),  # 50%的概率随机水平翻转
    T.ToTensor()  # 将像素值归一化到 [0.0,1.0]
])
valid_transform = T.Compose([
    T.CenterCrop(224),  # 从图像中心裁剪(224,224)的图像
    T.ToTensor()
])
train_dataset = torchvision.datasets.ImageFolder(root=train_dir, transform=train_transforms)  # 创造训练集
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=0)  # 创造训练数据加载器
valid_dataset = torchvision.datasets.ImageFolder(root=test_dir, transform=valid_transform)  # 创造测试集
valid_loader = torch.utils.data.DataLoader(valid_dataset)  # 创造测试集数据加载器
train_dataset.class_to_idx  # 查看训练集类映射
{'Cat': 0, 'Dog': 1}
# 使用BatchNormalization层的VGG16模型
model = torchvision.models.vgg16_bn(pretrained=True).to(device)
model.classifier[6] = nn.Linear(4096, 2, device=device)  # 修改classifier层最后一个输出2个特征
for name, module in model.named_modules():
    if name != 'classifier.6':  # 冻结除了最后一个层外所有层的参数
        # print(module)
        for parameter in module.parameters():
            parameter.required_grad = False
optimizer = optim.AdamW(model.parameters(), lr=1e-3, weight_decay=1e-3)  # 使用AdamW优化器,设置学习率和权值衰减
criterion = nn.CrossEntropyLoss().to(device)  # 使用交叉熵损失函数(相当于softmax+NLLLoss)
def train_one_epoch(epoch_index, tb_writer):
    running_loss = 0.
    last_loss = 0.
    for i, (inputs, labels) in enumerate(train_loader):  # 进行一个Epoch
        inputs, labels = inputs.to(device), labels.to(device)  # 将数据放到device上
        optimizer.zero_grad()  # 初始梯度置为0
        outputs = model(inputs)  # 得到输出
        loss = criterion(outputs, labels)  # 计算损失
        loss.backward()  # 损失反向传播,计算梯度
        optimizer.step()  # 使用优化器对权重进行更新
        running_loss += loss.item()  # 添加到运行损失中
        if i % 1000 == 999:  # 每1000批
            last_loss = running_loss / 1000  # 平均每批的损失
            print('  batch {} loss: {}'.format(i + 1, last_loss))
            tb_x = epoch_index * len(train_loader) + i + 1  # 作为全局步长来写入TensorBoard
            tb_writer.add_scalar('Loss/train', last_loss, tb_x)  # 写入训练损失到TensorBoard
            running_loss = 0.  # 重置训练损失

    return last_loss
# Initializing in a separate cell so we can easily add more epochs to the same run
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')  # 获取当前时间来作为写入的路径
writer = SummaryWriter('runs/dogvscat_trainer_{}'.format(timestamp))
epoch_number = 0

EPOCHS = 5  # 训练轮次

best_vloss = 1000000.  # 最好的测试集损失

for epoch in range(EPOCHS):
    print('EPOCH {}:'.format(epoch_number + 1))  # 输出轮次

    avg_loss = train_one_epoch(epoch_number, writer)  # 获得一轮结束后平均损失

    with torch.no_grad():  # 不求梯度
        running_vloss = 0.0
        for i, (vinputs, vlabels) in enumerate(valid_loader):
            vinputs, vlabels = vinputs.to(device), vlabels.to(device)
            voutputs = model(vinputs)
            vloss = criterion(voutputs, vlabels)
            running_vloss += vloss

    avg_vloss = running_vloss / (i + 1)
    print('LOSS train {} valid {}'.format(avg_loss, avg_vloss))

    writer.add_scalars('Training vs. Validation Loss',
                       {'Training': avg_loss, 'Validation': avg_vloss},
                       epoch_number + 1)  # 写入训练集和测试集损失
    writer.flush()  # 清空缓冲区数据

    if avg_vloss < best_vloss:  # 如果平均损失小于最小的损失
        best_vloss = avg_vloss  # 更新最小损失为平均损失
        model_path = 'model_{}_{}'.format(timestamp, epoch_number)
        torch.save(model.state_dict(), model_path)  # 将模型保存到该路径

    epoch_number += 1
EPOCH 1:
  batch 1000 loss: 0.6323979058712721


C:\Users\reion\miniconda3\envs\torch\lib\site-packages\PIL\TiffImagePlugin.py:845: UserWarning: Truncated File Read
  warnings.warn(str(msg))


LOSS train 0.6323979058712721 valid 0.6807079315185547
EPOCH 2:
  batch 1000 loss: 0.4388493032604456
LOSS train 0.4388493032604456 valid 0.687056303024292
EPOCH 3:
  batch 1000 loss: 0.3774294305369258
LOSS train 0.3774294305369258 valid 0.740951418876648
EPOCH 4:
  batch 1000 loss: 0.355394969265908
LOSS train 0.355394969265908 valid 0.8710261583328247
EPOCH 5:
  batch 1000 loss: 0.5819370641112328
LOSS train 0.5819370641112328 valid 0.7211285829544067
model.load_state_dict(torch.load('./model_20220518_195539_0'))  # 加载保存下来的模型
model.eval()
with torch.no_grad():  # 计算准确率
    correct = 0
    total = 0

    for i, (vinputs, vlabels) in enumerate(valid_loader):
        total += len(vinputs)
        vinputs, vlabels = vinputs.to(device), vlabels.to(device)
        voutputs = model(vinputs)
        correct += (voutputs.argmax(1) == vlabels).type(torch.float).sum().item()
print('准确率:' + str(correct / total))
准确率:0.7476

TensorBoard记录的训练损失和测试损失,蓝色为训练损失,红色为测试损失

posted @ 2022-05-18 18:34  里列昂遗失的记事本  阅读(810)  评论(0编辑  收藏  举报