VGGNet

VGGNet简介

VGG在2014年由牛津大学著名研究组VGG (Visual Geometry
Group) 提出,斩获该年ImageNet竞赛中 Localization Task (定位
任务) 第一名 和 Classification Task (分类任务) 第二名。

VGG结构

image.png
网络中的亮点:

  • 通过堆叠多个\(3\times 3\)的卷积核来替代大尺度卷积核(减少所需参数)

论文中提到,可以通过堆叠两个3x3的卷积核替代5x5的卷积核,堆叠三个3x3的卷积核替代7x7的卷积核。

CNN感受野

在卷积神经网络中,决定某一层输出结果中一个元素所对应的输入层的区域大小,被称作感受野(receptive field)。通俗的解释是,输出feature map上的一个单元对应输入层上的区域大小。
image.png
\(out_{size}=(in_{size}-F_{size}+2P)/S+1\)
感受野计算公式:
\(F(i)=((F(i+1)-1)\times Stride+Ksize\)

  • \(F(i)\)为为\(i\)层感受野
  • \(Stride\)为第\(i\)层的步距
  • \(Ksize\)为卷积核或池化核尺寸

论文中提到,可以通过堆叠两个3x3的卷积核替代5x5的卷积核,堆叠三个3x3的卷积核替代7x7的卷积核。
使用一个\(7\times7\)卷积核所需参数,与堆叠三个\(3\times3\)卷积核所需参数(假设输入输出channel为C)
\(7\times7\times C\times C=48C^2\\ 3\times\times3\times C\times C+3\times\times3\times C\times C+3\times\times3\times C\times C=27C^2\)
image.png

使用PyTorch搭建、训练VGG模型

数据集

数据集路径https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz

划分数据集

编写工具类脚本来划分数据集

import json
import os
import random

import matplotlib.pyplot as plt
import torch
import numpy as np
from PIL import Image
from torch.utils.data import Dataset


def read_split_data(root: str, val_rate: float = 0.2):  # 训练集和测试集划分
    random.seed(42)  # 保证随机结果可复现
    assert os.path.exists(root), "dataset root: {} does not exist.".format(root)

    # 遍历文件夹,一个文件夹对应一个类别
    flower_class = [cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))]
    # 排序,保证顺序一致
    flower_class.sort()
    # 生成类别名称以及对应的数字索引
    class_indices = dict((k, v) for v, k in enumerate(flower_class))
    json_str = json.dumps(dict((val, key) for key, val in class_indices.items()), indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    train_images_path = []  # 存储训练集的所有图片路径
    train_images_label = []  # 存储训练集图片对应索引信息
    val_images_path = []  # 存储验证集的所有图片路径
    val_images_label = []  # 存储验证集图片对应索引信息
    every_class_num = []  # 存储每个类别的样本总数
    supported = [".jpg", ".JPG", ".png", ".PNG"]  # 支持的文件后缀类型
    # 遍历每个文件夹下的文件
    for cla in flower_class:
        cla_path = os.path.join(root, cla)
        # 遍历获取supported支持的所有文件路径
        images = [os.path.join(root, cla, i) for i in os.listdir(cla_path)
                  if os.path.splitext(i)[-1] in supported]
        # 获取该类别对应的索引
        image_class = class_indices[cla]
        # 记录该类别的样本数量
        every_class_num.append(len(images))
        # 按比例随机采样验证样本
        val_path = random.sample(images, k=int(len(images) * val_rate))

        for img_path in images:
            if img_path in val_path:  # 如果该路径在采样的验证集样本中则存入验证集
                val_images_path.append(img_path)
                val_images_label.append(image_class)
            else:  # 否则存入训练集
                train_images_path.append(img_path)
                train_images_label.append(image_class)

    print("{} images were found in the dataset.".format(sum(every_class_num)))
    print("{} images for training.".format(len(train_images_path)))
    print("{} images for validation.".format(len(val_images_path)))

    plot_image = False
    if plot_image:
        # 绘制每种类别个数柱状图
        plt.bar(range(len(flower_class)), every_class_num, align='center')
        # 将横坐标0,1,2,3,4替换为相应的类别名称
        plt.xticks(range(len(flower_class)), flower_class)
        # 在柱状图上添加数值标签
        for i, v in enumerate(every_class_num):
            plt.text(x=i, y=v + 5, s=str(v), ha='center')
        # 设置x坐标
        plt.xlabel('image class')
        # 设置y坐标
        plt.ylabel('number of images')
        # 设置柱状图的标题
        plt.title('flower class distribution')
        plt.show()

    return train_images_path, train_images_label, val_images_path, val_images_label


class ImageDataset(Dataset):
    """自定义数据集"""

    def __init__(self, images_path: list, images_class: list, transform=None):
        self.images_path = images_path
        self.images_class = images_class
        self.transform = transform

    def __len__(self):
        return len(self.images_path)

    def __getitem__(self, item):
        img = Image.open(self.images_path[item]).convert("RGB")
        # RGB为彩色图片,L为灰度图片
        if img.mode != 'RGB':
            raise ValueError("image: {} isn't RGB mode.".format(self.images_path[item]))
        label = self.images_class[item]

        if self.transform is not None:
            img = self.transform(img)

        return img, label


def compute_mean_std(img_path, transform=None):
    """
    计算训练集图像的均值和标准差
    :param img_path: 训练集图像路径
    :param transform: 对图像进行变换
    :return: 均值和标准差
    """
    total = len(img_path)
    means = [0, 0, 0]
    stds = [0, 0, 0]
    for pic in img_path:
        img = Image.open(pic).convert("RGB")
        if transform:
            img = transform(img)
        img = np.asarray(img) / 255.
        for i in range(3):
            means[i] += img[:, :, i].mean()
            stds[i] += img[:, :, i].std()
    json_dict = {
        'mean': (np.asarray(means) / total).tolist(),
        'std': (np.asarray(stds) / total).tolist()
    }
    json_str = json.dumps(json_dict)
    with open('mean_std.json', 'w') as json_file:
        json_file.write(json_str)
    return json_dict

编写模型脚本文件

import torch.nn as nn
import torch

# 官方预训练权重路径
model_urls = {
    'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
    'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
    'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
    'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'
}


# 使用nn.Module子类方法来创建VGG模型
class VGG(nn.Module):
    def __init__(self, features, num_classes=1000, init_weights=False):
        super(VGG, self).__init__()
        self.features = features  # VGG的特侦提取层(主要是卷积层)
        self.classifier = nn.Sequential(  # 将提取特征展平后传入线性层,来进行分类
            nn.Linear(512 * 7 * 7, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(4096, num_classes)
        )
        if init_weights:  # 是否初始化权重
            self._initialize_weights()

    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, start_dim=1)  # 从第二个维度开始展平,第一个维度是批次
        x = self.classifier(x)  # 根据提取特征进行分类
        return x

    def _initialize_weights(self):  # 是否初始化权重
        for m in self.modules():
            if isinstance(m, nn.Conv2d):  # 如果是卷积层
                nn.init.xavier_uniform_(m.weight)  # 使用xavier初始化卷积层权重
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)  # 初始化卷积层偏置为0
            elif isinstance(m, nn.Linear):  # 如果是线性层
                nn.init.xavier_uniform_(m.weight)  # 使用xavier初始化线性层权重
                nn.init.constant_(m.bias, 0)  # 初始化线性层偏执为0


# 通过便利列表的方式,来创建特征提取层
def make_features(cfg: list):
    layers = []  # 使用列表来存放层
    in_channels = 3  # 初始输入通道为3
    for v in cfg:
        if v == 'M':  # 如果是M,则添加一层池化层
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        else:  # 否则,就添加卷积层和激活层
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)  # 将layers解包传入Sequential,构成序列模型


# ‘M’为池化层,其余都是卷积层,默认输入都是RGB彩色图像,三通道。
cfgs = {
    'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}


def vgg(model_name='vgg16', **kwargs):
    assert model_name in cfgs, 'Warning: model number {} not in cfgs dict!'.format(model_name)
    cfg = cfgs[model_name]

    model = VGG(make_features(cfg), **kwargs)
    return model

编写训练脚本

import os
import sys
import json

import torch
import torch.nn as nn
from torchvision import transforms, datasets
import torch.optim as optim
from tqdm import tqdm

from model import vgg
from utils import read_split_data, ImageDataset, compute_mean_std


def main():
    # 设置训练设备
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print('using {} device.'.format(device))

    # 图片路径
    image_path = r"D:\卷积神经网络PPT\AlexNet\Alex_tf\data\flower_photos"
    # 创建训练集

    train_images_path, train_images_label, val_images_path, val_images_label = read_split_data(root=image_path)
    # 训练集均值和标准差字典
    mean_std = compute_mean_std(train_images_path)

    # 设置训练集和验证集的数据增强
    data_transform = {
        'train': transforms.Compose([
            transforms.RandomResizedCrop(224),  # 随机裁剪后缩放到指定大小
            transforms.RandomHorizontalFlip(),  # 随机水平翻转
            transforms.ToTensor(),
            transforms.Normalize(mean_std['mean'], mean_std['std'])  # 标准化
        ]),
        'val': transforms.Compose([
            transforms.Resize((224, 224)),  # 缩放到指定大小
            transforms.ToTensor(),
            transforms.Normalize(mean_std['mean'], mean_std['std'])
        ])
    }

    # 设置批次大小
    batch_size = 32
    # 设置加载数据进程数
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
    print('using {} dataloader workers every process'.format(nw))
    train_dataset = ImageDataset(train_images_path, train_images_label, transform=data_transform['train'])
    train_num = len(train_dataset)

    # 装载训练集数据
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               pin_memory=True,
                                               num_workers=nw)
    # 创建验证集
    valid_dataset = ImageDataset(val_images_path, val_images_label, transform=data_transform['val'])
    # 装载验证集数据
    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=batch_size,
                                               shuffle=False,
                                               pin_memory=True,
                                               num_workers=nw)
    # 记录验证集数据量
    val_num = len(valid_dataset)
    print('using {} images for training, {} images for validation'.format(train_num, val_num))
    # 使用vgg16模型
    model_name = 'vgg16'
    # 初始化模型
    net = vgg(model_name=model_name, num_classes=5, init_weights=True)
    net.to(device)
    # 设置损失函数,交叉熵损失函数
    loss_function = nn.CrossEntropyLoss()
    # 使用AdamW优化器
    optimizer = optim.AdamW(net.parameters(), lr=1e-4, weight_decay=5e-2)

    epochs = 10  # 训练10轮
    best_acc = 0.0  # 记录最高准确率
    save_path = './{}Net.pth'.format(model_name)  # 模型权重保存路径
    # 每轮迭代次数
    train_steps = len(train_loader)
    for epoch in range(epochs):
        # 设置模型为训练状态
        net.train()
        # 训练损失
        running_loss = 0.0
        train_bar = tqdm(train_loader, file=sys.stdout)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            outputs = net(images.to(device))
            loss = loss_function(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            # 显示训练过程中的信息
            train_bar.desc = 'train epoch[{}/{}] loss:{:.3f}'.format(epoch + 1, epochs, loss.item())
        # 设置模型为评估状态
        net.eval()
        acc = 0.0  # 记录当前准确率
        with torch.no_grad():
            val_bar = tqdm(valid_loader, file=sys.stdout)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                y_pred = torch.argmax(outputs, dim=1)
                # 计算准确数量
                acc += (torch.eq(y_pred, val_labels.to(device))).sum().item()
        val_accurate = acc / val_num  # 计算验证集准确率
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        # 保存验证集上准确率最高的模型
        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')


if __name__ == '__main__':
    main()

训练结果:

using cuda:0 device.
3670 images were found in the dataset.
2939 images for training.
731 images for validation.
using 8 dataloader workers every process
using 2939 images for training, 731 images for validation
train epoch[1/10] loss:1.542: 100%|██████████| 92/92 [00:51<00:00,  1.80it/s]
100%|██████████| 23/23 [00:16<00:00,  1.39it/s]
[epoch 1] train_loss: 1.456  val_accuracy: 0.428

预测脚本

import os
import json
import torch
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
from model import vgg


def main():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize([0.46490331, 0.42357826, 0.30057153], [0.25249117, 0.22684784, 0.23048306])
    ])
    img_path = r'D:/卷积神经网络PPT/AlexNet/Alex_Torch/img.png'
    assert os.path.exists(img_path), "file: '{}' does not exist.".format(img_path)
    img = Image.open(img_path)
    plt.imshow(img)
    img = data_transform(img)
    # 拓展成四维,增加一个批次维度
    img = torch.unsqueeze(img, dim=0)  # [batch_size,channel,height,width]
    # 读取类索引json文件
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' does not exist.".format(json_path)
    with open(json_path, 'r') as f:
        class_indict = json.load(f)
    # 实例化模型
    model = vgg(model_name='vgg16', num_classes=5).to(device)
    # 加载训练好的模型权重
    weights_path = './vgg16Net.pth'
    assert os.path.exists(weights_path), "file: '{}' does not exist.".format(weights_path)
    model.load_state_dict(torch.load(weights_path))

    # 设置模型为评估状态
    model.eval()
    # 不跟踪梯度
    with torch.no_grad():
        outputs = torch.squeeze(model(img.to(device))).cpu()
        predict = torch.softmax(outputs, dim=0)
        predict_cla = torch.argmax(predict).numpy()
    print_res = "class: {} prob:{:.3}".format(class_indict[str(predict_cla)],
                                              predict[predict_cla].numpy())
    plt.grid(False)
    plt.axis(False)
    plt.title(print_res)
    for i in range(len(predict)):
        print("class: {:10}   prob: {:.3}".format(class_indict[str(i)],
                                                  predict[i].numpy()))
    plt.show()


if __name__ == '__main__':
    main()

使用TensorFlow搭建、训练VGG模型

模型脚本

from tensorflow.keras import layers, Model, Sequential

# 设置卷积层权重初始化方式
CONV_KERNEL_INITIALIZER = {
    'class_name': 'VarianceScaling',
    'config': {
        'scale': 2.0,
        'mode': 'fan_out',
        'distribution': 'truncated_normal'
    }
}
# 设置线性层权重初始化方式
DENSE_KERNEL_INITIALIZER = {
    'class_name': 'VarianceScaling',
    'config': {
        'scale': 1. / 3.,
        'mode': 'fan_out',
        'distribution': 'uniform'
    }
}


def VGG(feature, im_height=224, im_width=224, num_classes=1000):
    # tensorflow中的tensor通道排序是NHWC
    # 输入层
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")
    # 特征提取层
    x = feature(input_image)
    # 展平
    x = layers.Flatten()(x)
    # 线性层
    x = layers.Dropout(rate=0.5)(x)
    x = layers.Dense(2048, activation='relu',
                     kernel_initializer=DENSE_KERNEL_INITIALIZER)(x)
    x = layers.Dropout(rate=0.5)(x)
    x = layers.Dense(2048, activation='relu',
                     kernel_initializer=DENSE_KERNEL_INITIALIZER)(x)
    x = layers.Dense(num_classes,
                     kernel_initializer=DENSE_KERNEL_INITIALIZER)(x)
    output = layers.Softmax()(x)
    model = Model(inputs=input_image, outputs=output)
    return model


# 通过遍历vgg参数列表来创建特征提取层
def make_feature(cfg):
    feature_layers = []
    for v in cfg:
        # 如果v=‘M’则添加池化层
        if v == "M":
            feature_layers.append(layers.MaxPool2D(pool_size=2, strides=2))
        # 否则就添加卷积层
        else:
            conv2d = layers.Conv2D(v, kernel_size=3, padding="SAME", activation="relu",
                                   kernel_initializer=CONV_KERNEL_INITIALIZER)
            feature_layers.append(conv2d)
    return Sequential(feature_layers, name="feature")


cfgs = {
    'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}


def vgg(model_name="vgg16", im_height=224, im_width=224, num_classes=1000):
    assert model_name in cfgs.keys(), "not support model {}".format(model_name)
    cfg = cfgs[model_name]
    model = VGG(make_feature(cfg), im_height=im_height, im_width=im_width, num_classes=num_classes)
    return model

训练脚本

import matplotlib.pyplot as plt
from model import vgg
import tensorflow as tf
import json
import os
import time
import glob
import random
from utils import read_split_data

# 设置运行时设备查询按照 PCI_BUS_ID 的顺序索引
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# 设置运行时设备索引为0
os.environ["CUDA_VISIBLE_DEVICES"] = "0"


def main():
    gpus = tf.config.experimental.list_physical_devices("GPU")
    if gpus:
        try:
            for gpu in gpus:
                # 设置仅在需要时申请显存空间
                tf.config.experimental.set_memory_growth(gpu, True)
        except RuntimeError as e:
            print(e)
            exit(-1)
    image_path: str = r"D:\卷积神经网络PPT\AlexNet\Alex_Torch\data\flower_photos"  # 图片所在路径
    train_images_path, train_images_label, val_images_path, val_images_label = read_split_data(image_path)
    train_num = len(train_images_path)
    val_num = len(val_images_path)
    if not os.path.exists('weights'):  # 权重保存路径
        os.mkdir('weights')

    im_height = 224  # 图像高度
    im_width = 224  # 图像宽度
    batch_size = 32  # 批次大小
    epochs = 20  # 轮次

    print('using {} images for training, {} images for validation.'.format(train_num, val_num))

    # 对图像进行处理
    # @tf.function
    def process_path(img_path, label):
        label = tf.one_hot(label, depth=5)
        image = tf.io.read_file(img_path)
        image = tf.image.decode_jpeg(image)
        image = tf.image.convert_image_dtype(image, tf.float32)
        image = tf.image.resize(image, [im_height, im_width])
        return image, label

    # 自动寻找合适的参数
    AUTOTUNE = tf.data.experimental.AUTOTUNE
    # 创建训练数据集,将数据集打乱,使用map将process_path应用到数据集上,并对数据集做处理,加快读取速度
    train_dataset = tf.data.Dataset.from_tensor_slices((train_images_path, train_images_label))
    train_dataset = train_dataset.shuffle(buffer_size=train_num) \
        .map(process_path, num_parallel_calls=AUTOTUNE) \
        .repeat().batch(batch_size).prefetch(AUTOTUNE)

    val_dataset = tf.data.Dataset.from_tensor_slices((val_images_path, val_images_label))
    val_dataset = val_dataset.map(process_path, num_parallel_calls=AUTOTUNE) \
        .repeat().batch(batch_size)

    # 实例化模型
    model = vgg('vgg16', im_height=im_height, im_width=im_width, num_classes=5)
    model.summary()
    # 损失函数
    loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
    # 优化器
    optimizer = tf.keras.optimizers.Adam(lr=1e-4)

    # 训练集损失观测指标
    train_loss = tf.keras.metrics.Mean(name='train_loss')
    # 训练集交叉熵准确率观测指标
    train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_acc')

    valid_loss = tf.keras.metrics.Mean(name='valid_loss')
    val_accuracy = tf.keras.metrics.CategoricalAccuracy(name='valid_acc')

    # 使用@tf.function装饰,将函数以图执行模式运行
    @tf.function
    def train_step(images, labels):
        with tf.GradientTape() as tape:
            predictions = model(images, training=True)
            loss = loss_object(labels, predictions)
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        train_loss(loss)
        train_accuracy(labels, predictions)

    @tf.function
    def valid_step(images, labels):
        predictions = model(images, training=False)
        t_loss = loss_object(labels, predictions)

        valid_loss(t_loss)
        val_accuracy(labels, predictions)

    best_valid_loss = float('inf')  # 初始化最小的验证集损失为inf
    train_step_num = train_num // batch_size
    val_step_num = val_num // batch_size
    for epoch in range(1, epochs + 1):
        train_loss.reset_states()  # 重置参数
        train_accuracy.reset_states()
        valid_loss.reset_states()
        val_accuracy.reset_states()

        # t1 = time.perf_counter()  # 记录运行时间
        for index, (images, labels) in enumerate(train_dataset):
            train_step(images, labels)
            if index + 1 == train_step_num:  # 达到每轮的批次数时,结束该轮训练
                break
        # print(time.perf_counter() - t1)
        for index, (images, labels) in enumerate(val_dataset):
            valid_step(images, labels)
            if index + 1 == val_step_num:
                break
        template = 'Epoch {}, Loss: {}, Accuracy: {:.3f}, Test Loss: {}, Test Accuracy: {:.3f}'
        print(template.format(epoch,
                              train_loss.result(),
                              train_accuracy.result() * 100,
                              valid_loss.result(),
                              val_accuracy.result() * 100))
        if valid_loss.result() < best_valid_loss:
            best_valid_loss = valid_loss.result()
            model.save_weights("./save_weights/Alex-{}-{:.3f}.ckpt".format(epoch, best_valid_loss), save_format='tf')


if __name__ == '__main__':
    main()

训练结果:

Epoch 5, Loss: 0.8564379215240479, Accuracy: 67.273, Test Loss: 0.8504697680473328, Test Accuracy: 68.182
Epoch 6, Loss: 0.8076342344284058, Accuracy: 68.510, Test Loss: 0.8520256280899048, Test Accuracy: 68.324
Epoch 7, Loss: 0.7501738667488098, Accuracy: 71.223, Test Loss: 0.8046029806137085, Test Accuracy: 70.455
Epoch 8, Loss: 0.7029500007629395, Accuracy: 73.661, Test Loss: 0.8594173789024353, Test Accuracy: 69.886
Epoch 9, Loss: 0.63121098279953, Accuracy: 76.511, Test Loss: 0.7451947927474976, Test Accuracy: 71.733
Epoch 10, Loss: 0.614281952381134, Accuracy: 77.473, Test Loss: 0.7642716765403748, Test Accuracy: 74.006

预测脚本

import os
import json
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from model import vgg


def main():
    im_height = 224
    im_width = 224
    # 读取图片,并对图像做预处理
    img_path = r'D:/卷积神经网络PPT/AlexNet/Alex_Torch/img.png'
    img = Image.open(img_path).convert("RGB")

    img = img.resize((im_height, im_width))
    plt.imshow(img)

    img = np.asarray(img) / 255.
    img = np.expand_dims(img, 0)

    # 读取索引对应的类
    json_path = './class_indices.json'
    with open(json_path, 'r') as f:
        class_dict = json.load(f)
    # 实例化模型,并加载权重
    model = vgg(model_name='vgg16',num_classes=5)
    weights_path = './save_weights/VGG16-9-0.791.ckpt'
    model.load_weights(weights_path)

    # 缩减批次维度
    result = np.squeeze(model.predict(img))
    predict_class = np.argmax(result)  # 获取概率最大的索引
    # 输出图片预测类和预测类的概率
    print_res = "class: {}   prob: {:.3}".format(class_dict[str(predict_class)],
                                                 result[predict_class])
    plt.title(print_res)
    for i in range(len(result)):
        print("class: {:10}   prob: {:.3}".format(class_dict[str(i)],
                                                  result[i]))
    plt.show()


if __name__ == '__main__':
    main()

image.png

posted @ 2022-07-29 00:44  里列昂遗失的记事本  阅读(138)  评论(0编辑  收藏  举报