AlexNet网络

AlexNet简介

AlexNet是2012年ISLVRC 2012(ImageNet Large Scale Visual Recognition
Challenge)竞赛的冠军网络,分类准确率由传统的 70%+提升到 80%+。
它是由Hinton和他的学生Alex Krizhevsky设计的。也是在那年之后,深
度学习开始迅速发展。

AlexNet网络的优点

  1. 首次利用GPU进行网络加速训练。
  2. 使用了ReLU激活函数,而不是传统的Sigmoid激活函数以及Tanh激活函数。
  3. 使用了LRN局部响应归一化。
  4. 在全连接层的前两层中使用了Dropout随机失活神经元操作,以减少过拟合。

过拟合:
根本原因是特征维度过多,模型假设过于复杂,参数过多,训练数据过少,噪声较多,导致拟合的函数完美的预测训练集,但对新数据的测试集预测结果差。过度地拟合了训练数据,而没有考虑到泛化能力。
Dropout方法:
使用Dropout的方法在网络正向传播过程中随机失活一部分神经元。
image.png
未使用Dropout的正向传播 使用Dropout后的正向传播
卷积后图像大小:
经过卷积后矩阵尺寸大小计算公式为:
\(N=(W-F+2P)/S+1\)

  1. 输入图片大小\(W\times W\)
  2. Filter大小为\(F\times F\)
  3. 步长为\(S\)
  4. padding的像素数\(P\)

AlexNet网络架构

image.png

layer_name kernel_size kernel_num padding stride
Conv1 11 96 [1,2] 4
Maxpool1 3 None 0 2
Conv2 5 256 [2,2] 1
Maxpool2 3 None 0 2
Conv3 3 384 [1,1] 1
Conv4 3 384 [1,1] 1
Conv5 3 256 [1,1] 1
Maxpool3 3 None 0 2
FC1 2048 None None None
FC2 2048 None None None
FC3 1000 None None None

使用PyTorch搭建AlexNet并训练花分类数据集

数据集链接:https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz

构建模型类

模型特征提取层

self.features = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=48, kernel_size=11, stride=4, padding=2), # [3,224,224]->[48,55,55]
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2), # [48,55,55]->[28,27,27]
            nn.Conv2d(in_channels=48, out_channels=128, kernel_size=5, padding=2), # [28,27,27]->[128,27,27]
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2), # [128,27,27]->[128,13,13]
            nn.Conv2d(in_channels=128, out_channels=192, kernel_size=3, padding=1), # [128,13,13]->[192,13,13]
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, padding=1), # [192,13,13]->[192,13,13]
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=192, out_channels=128, kernel_size=3, padding=1), # [192,13,13]->[128,13,13]
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2) # [128,13,13]->[128,6,6]
        )

模型分类层

self.classifier = nn.Sequential(
            nn.Dropout(p=0.5), # Dropout层,随机舍弃50%
            nn.Linear(128 * 6 * 6, 2048), # 线性层
            nn.ReLU(inplace=True), 
            nn.Dropout(p=0.5), 
            nn.Linear(2048, 2048),
            nn.ReLU(inplace=True),
            nn.Linear(2048, num_classes) # 输出num_classes
        )

卷积层和线性层权重初始化

def _initialize_weights(self):
        for m in self.modules(): # 遍历模型的模块
            if isinstance(m, nn.Conv2d): # 如果是卷积层
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear): # 如果是线性层
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

模型的正向传播

def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, start_dim=1)
        x = self.classifier(x)
        return x

模型全部代码

import torch
import torch.nn as nn


class AlexNet(nn.Module):
    def __init__(self, num_classes=1000, init_weights=False):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=48, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(in_channels=48, out_channels=128, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(in_channels=128, out_channels=192, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=192, out_channels=128, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2)
        )
        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(128 * 6 * 6, 2048),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(2048, 2048),
            nn.ReLU(inplace=True),
            nn.Linear(2048, num_classes)

        )
        if init_weights:
            self._initialize_weights()

    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, start_dim=1)
        x = self.classifier(x)
        return x

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

划分数据集

import os
from shutil import copyfile
import random
from PIL import Image


def split_dataset(source_dir, destination_dir):
    # 设置随机种子方便复现
    random.seed(42)
    # os.mkdir('./data')  # 创建目录来存放训练数据和测试数据
    os.mkdir(os.path.join(destination_dir, 'train'))
    os.mkdir(os.path.join(destination_dir, 'val'))
    for folder in os.listdir(source_dir):  # 遍历cat和dog目录
        files = os.listdir(os.path.join(source_dir, folder))  # 拼接得到图像文件的父目录
        images = []  # 创建列表来存储图像路径
        for f in files:  # 遍历Dog/Cat目录下的图像
            try:
                Image.open(os.path.join(source_dir, folder, f)).convert("RGB")
                images.append(f)
            except IOError:  # 如果发生输入输出错误则输出发生错误的文件,并跳过该文件
                print(f'fail on {f}')
                pass

        random.shuffle(images)  # 将列表中的路径打乱
        count = len(images)  # Cat/Dog目录下的总图片数
        split = int(0.8 * count)  # 选取其中80%作为训练集
        os.mkdir(os.path.join(destination_dir, 'train', folder))  # 创建训练集下的目录
        os.mkdir(os.path.join(destination_dir, 'val', folder))  # 创建测试集下的目录

        for c in range(split):
            source_file = os.path.join(source_dir, folder, images[c])  # 得到训练集源文件的路径
            distination = os.path.join(destination_dir, 'train', folder, images[c])  # 创建目标路径
            copyfile(source_file, distination)  # 将训练集路径下的文件放到训练集中
        for c in range(split, count):
            source_file = os.path.join(source_dir, folder, images[c])  # 得到测试集源文件的路径
            distination = os.path.join(destination_dir, 'val', folder, images[c])  # 创建目标存放路径
            copyfile(source_file, distination)  # 将测试集路径下的文件放到测试集中
    print('split finishing')


if __name__ == '__main__':
    input_data_dir = r'C:\Users\reion\Desktop\卷积神经网络PPT\Alex_Torch\data\flower_photos'
    destination_dir = r'./data'
    split_dataset(input_data_dir, destination_dir)

训练模型脚本

import json
import os
import sys

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, datasets
from tqdm import tqdm

from model import AlexNet


def main():
    # 设置训练设备
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # 输出使用的设备
    print("using {} device.".format(device))
    # 设置训练集和验证集的数据增强
    data_transform = {
        "train": transforms.Compose([transforms.RandomResizedCrop(224),  # 随机裁剪一块缩放为224*224的图像
                                     transforms.RandomHorizontalFlip(),  # 随机水平翻转图像
                                     transforms.ToTensor(),  # 将数据转换为张量,并将每个数值归一化到[0,1]
                                     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
        "val": transforms.Compose([transforms.Resize((224, 224)),  # Resize必须写(224,224)不能写224
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

    image_path = os.path.join('./data')  # 图像类文件夹的父目录

    assert os.path.exists(image_path), "{} path does not exist.".format(image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "train"),
                                         transform=data_transform["train"])
    train_num = len(train_dataset)  # 获取训练集数量

    # 获取类对应的索引
    flower_list = train_dataset.class_to_idx
    # 交换键值
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # 将类索引字典写入json文件
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)
    # 设置批次大小
    batch_size = 256
    # 设置加载数据的进程数
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
    print('using {} dataloader workers every process'.format(nw))
    # 通过DataLoader装载
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=nw,
                                               pin_memory=True)

    validate_dataset = datasets.ImageFolder(root=os.path.join(image_path, "val"),
                                            transform=data_transform["val"])
    # 获取验证集数量
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=128,
                                                  shuffle=False,
                                                  num_workers=nw,
                                                  pin_memory=True)

    print("using {} images for training, {} images for validation.".format(train_num,
                                                                           val_num))
    # 实例化模型类
    net = AlexNet(num_classes=5, init_weights=True)
    # 将模型放到训练设备上
    net.to(device)
    # 设置损失函数
    loss_function = nn.CrossEntropyLoss()
    # 对模型中可求导的参数进行梯度下降
    pg = [p for p in net.parameters() if p.requires_grad]
    # 设置优化器
    optimizer = optim.AdamW(pg, lr=1e-3)
    # 设置训练轮数
    epochs = 10
    # 模型权重保存路径
    save_path = './AlexNet.pth'
    # 记录最高的准确率
    best_acc = 0.0
    # 训练步数
    train_steps = len(train_loader)
    for epoch in range(epochs):
        # 设置模型为训练状态
        net.train()
        running_loss = 0.0
        # 设置进度条
        train_bar = tqdm(train_loader, file=sys.stdout)
        for step, data in enumerate(train_bar):
            images, labels = data
            # 梯度清零
            optimizer.zero_grad()
            # 正向传播获得输出
            outputs = net(images.to(device))
            # 计算损失
            loss = loss_function(outputs, labels.to(device))
            # 损失进行反向传播
            loss.backward()
            # 更新权重参数
            optimizer.step()

            # 累加每轮损失
            running_loss += loss.item()
            # 输出训练状态
            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
                                                                     epochs,
                                                                     loss)

        # 设置模型为评估状态
        net.eval()
        # 计算准确率
        acc = 0.0
        # 不跟踪梯度
        with torch.no_grad():
            val_bar = tqdm(validate_loader, file=sys.stdout)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                predict_y = torch.argmax(outputs, dim=1)
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))
        # 保存在验证集上准确率最高的模型
        if val_accurate > best_acc:
            print('accuracy from {:.3f} to {:.3f}, saving model...'.format(best_acc, val_accurate))
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')


if __name__ == '__main__':
    main()

预测脚本

import os
import json
import torch
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
from model import AlexNet


def main():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    img_path = './img.png'
    assert os.path.exists(img_path), "file: '{}' does not exist.".format(img_path)
    img = Image.open(img_path)
    plt.imshow(img)
    img = data_transform(img)
    # 拓展成四维,增加一个批次维度
    img = torch.unsqueeze(img, dim=0)  # [batch_size,channel,height,width]
    # 读取类索引json文件
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' does not exist.".format(json_path)
    with open(json_path, 'r') as f:
        class_indict = json.load(f)
    # 实例化模型
    model = AlexNet(num_classes=5).to(device)
    # 加载训练好的模型权重
    weights_path = './AlexNet.pth'
    assert os.path.exists(weights_path), "file: '{}' does not exist.".format(weights_path)
    model.load_state_dict(torch.load(weights_path))

    # 设置模型为评估状态
    model.eval()
    # 不跟踪梯度
    with torch.no_grad():
        outputs = torch.squeeze(model(img.to(device))).cpu()
        predict = torch.softmax(outputs, dim=0)
        predict_cla = torch.argmax(predict).numpy()
    print_res = "class: {} prob:{:.3}".format(class_indict[str(predict_cla)],
                                              predict[predict_cla].numpy())
    plt.grid(False)
    plt.axis(False)
    plt.title(print_res)
    for i in range(len(predict)):
        print("class: {:10}   prob: {:.3}".format(class_indict[str(i)],
                                                  predict[i].numpy()))
    plt.show()


if __name__ == '__main__':
    main()

预测结果:

image.png

使用TensorFlow2.x搭建AlexNet

创建模型

from model import AlexNet_v1
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import json
import os
import matplotlib.pyplot as plt

image_path = r'./data'  # 图片所在路径
train_dir = os.path.join(image_path, 'train')  # 训练集路径
valid_dir = os.path.join(image_path, 'val')  # 验证集路径

if not os.path.exists('weights'):  # 权重保存路径
    os.mkdir('weights')
im_height = 224  # 图像高度
im_width = 224  # 图像宽度
batch_size = 32  # 批次大小
epochs = 10  # 轮数
# 图像处理和图像增强
train_image_gen = ImageDataGenerator(rescale=1. / 255,  # 将图像缩放到[0, 1]
                                     horizontal_flip=True)  # 随机水平翻转
valid_image_gen = ImageDataGenerator(rescale=1. / 255)
# 构造训练集
train_dataset = train_image_gen.flow_from_directory(directory=train_dir,  # 训练集目录
                                                    batch_size=batch_size,  # 批次大小
                                                    shuffle=True,  # 是否打乱数据集
                                                    target_size=(im_height, im_width),  # 目标图像大小
                                                    class_mode='categorical')  # 目标类的编码方式
# 训练集样本数量
total_train = train_dataset.n
# 构造验证集
val_dataset = valid_image_gen.flow_from_directory(directory=valid_dir,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  target_size=(im_height, im_width),
                                                  class_mode='categorical')
# 验证集样本数量
total_valid = val_dataset.n
# 类对应的索引
class_indices = train_dataset.class_indices
# 索引对应的类
inverse_dict = dict((val, key) for key, val in class_indices.items())
# 将索引对应的类写入json文件
json_str = json.dumps(inverse_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
    json_file.write(json_str)
# 实例化模型
model = AlexNet_v1(im_height=im_height, im_width=im_width, num_classes=5)
# 输出模型的形状
model.summary()
# 编译模型
model.compile(optimizer=tf.keras.optimizers.Adam(lr=5e-4),  # 使用Adam优化器
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),  # 交叉熵损失函数
              metrics=['accuracy'])  # 准确率观测指标
# 使用回调函数
# 这里只设置了保存权重的损失函数
call_backs = [tf.keras.callbacks.ModelCheckpoint(filepath='./weights/AlexNet.h5',  # 保存权重路径
                                                 save_best_only=True,  # 只保存最好的
                                                 save_weights_only=True,  # 只保存权重
                                                 mointer='val_loss')]  # 监测指标
# 训练模型
history = model.fit(train_dataset,
                    steps_per_epoch=total_train // batch_size,  # 每轮执行steps个批次
                    epochs=epochs,
                    validation_data=val_dataset,
                    validation_steps=total_valid // batch_size,
                    callbacks=call_backs)
# 获取训练数据画图
history_dict = history.history
train_loss = history_dict['loss']
train_acc = history_dict['accuracy']
val_loss = history_dict['val_loss']
val_acc = history_dict['val_accuracy']

plt.figure()
plt.plot(range(epochs), train_loss, label='train_loss')
plt.plot(range(epochs), val_loss, label='val_loss')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('loss')

plt.figure()
plt.plot(range(epochs), train_acc, label='train_accuracy')
plt.plot(range(epochs), val_acc, label='val_accuracy')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.show()

训练模型

使用高层API训练模型

from model import AlexNet_v1
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import json
import os
import matplotlib.pyplot as plt

image_path = r'./data'  # 图片所在路径
train_dir = os.path.join(image_path, 'train')  # 训练集路径
valid_dir = os.path.join(image_path, 'val')  # 验证集路径

if not os.path.exists('weights'):  # 权重保存路径
    os.mkdir('weights')
im_height = 224  # 图像高度
im_width = 224  # 图像宽度
batch_size = 32  # 批次大小
epochs = 10  # 轮数
# 图像处理和图像增强
train_image_gen = ImageDataGenerator(rescale=1. / 255,  # 将图像缩放到[0, 1]
                                     horizontal_flip=True)  # 随机水平翻转
valid_image_gen = ImageDataGenerator(rescale=1. / 255)
# 构造训练集
train_dataset = train_image_gen.flow_from_directory(directory=train_dir,  # 训练集目录
                                                    batch_size=batch_size,  # 批次大小
                                                    shuffle=True,  # 是否打乱数据集
                                                    target_size=(im_height, im_width),  # 目标图像大小
                                                    class_mode='categorical')  # 目标类的编码方式
# 训练集样本数量
total_train = train_dataset.n
# 构造验证集
val_dataset = valid_image_gen.flow_from_directory(directory=valid_dir,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  target_size=(im_height, im_width),
                                                  class_mode='categorical')
# 验证集样本数量
total_valid = val_dataset.n
# 类对应的索引
class_indices = train_dataset.class_indices
# 索引对应的类
inverse_dict = dict((val, key) for key, val in class_indices.items())
# 将索引对应的类写入json文件
json_str = json.dumps(inverse_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
    json_file.write(json_str)
# 实例化模型
model = AlexNet_v1(im_height=im_height, im_width=im_width, num_classes=5)
# 输出模型的形状
model.summary()
# 编译模型
model.compile(optimizer=tf.keras.optimizers.Adam(lr=5e-4),  # 使用Adam优化器
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),  # 交叉熵损失函数
              metrics=['accuracy'])  # 准确率观测指标
# 使用回调函数
# 这里只设置了保存权重的损失函数
call_backs = [tf.keras.callbacks.ModelCheckpoint(filepath='./weights/AlexNet.h5',  # 保存权重路径
                                                 save_best_only=True,  # 只保存最好的
                                                 save_weights_only=True,  # 只保存权重
                                                 mointer='val_loss')]  # 监测指标
# 训练模型
history = model.fit(train_dataset,
                    steps_per_epoch=total_train // batch_size,  # 每轮执行steps个批次
                    epochs=epochs,
                    validation_data=val_dataset,
                    validation_steps=total_valid // batch_size,
                    callbacks=call_backs)
# 获取训练数据画图
history_dict = history.history
train_loss = history_dict['loss']
train_acc = history_dict['accuracy']
val_loss = history_dict['val_loss']
val_acc = history_dict['val_accuracy']

plt.figure()
plt.plot(range(epochs), train_loss, label='train_loss')
plt.plot(range(epochs), val_loss, label='val_loss')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('loss')

plt.figure()
plt.plot(range(epochs), train_acc, label='train_accuracy')
plt.plot(range(epochs), val_acc, label='val_accuracy')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.show()
Epoch 8/10
91/91 [==============================] - 7s 80ms/step - loss: 0.8124 - accuracy: 0.6919 - val_loss: 0.7893 - val_accuracy: 0.6902
Epoch 9/10
91/91 [==============================] - 7s 82ms/step - loss: 0.7598 - accuracy: 0.7212 - val_loss: 0.7687 - val_accuracy: 0.6984
Epoch 10/10
91/91 [==============================] - 8s 84ms/step - loss: 0.6937 - accuracy: 0.7350 - val_loss: 0.7726 - val_accuracy: 0.7065

image.pnga5596708-1ce3-4635-83a6-c97580030bd1.png

使用底层API训练模型

import matplotlib.pyplot as plt
from model import AlexNet_v1, AlexNet_v2
import tensorflow as tf
import json
import os
import time
import glob
import random

# 设置运行时设备查询按照 PCI_BUS_ID 的顺序索引
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# 设置运行时设备索引为0
os.environ["CUDA_VISIBLE_DEVICES"] = "0"


def main():
    gpus = tf.config.experimental.list_physical_devices("GPU")
    if gpus:
        try:
            for gpu in gpus:
                # 设置仅在需要时申请显存空间
                tf.config.experimental.set_memory_growth(gpu, True)
        except RuntimeError as e:
            print(e)
            exit(-1)
    image_path = r'./data'  # 图片所在路径
    train_dir = os.path.join(image_path, 'train')  # 训练集路径
    valid_dir = os.path.join(image_path, 'val')  # 验证集路径

    if not os.path.exists('weights'):  # 权重保存路径
        os.mkdir('weights')

    im_height = 224  # 图像高度
    im_width = 224  # 图像宽度
    batch_size = 64  # 批次大小
    epochs = 10  # 轮次

    data_class = [cla for cla in os.listdir(train_dir) if os.path.isdir(os.path.join(train_dir, cla))]  # 获取类名
    class_num = len(data_class)  # 获取类数量
    class_dict = dict((val, key) for key, val in enumerate(data_class))  # 获得类对应的索引
    # 获得索引对应的类
    inverse_dict = dict((val, key) for key, val in class_dict.items())
    # 将索引对应的类写入json文件
    json_str = json.dumps(inverse_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)
    # 通过正则表达式来获取训练集中所有图片的路径,并将其放在一个列表中
    train_image_list = glob.glob(train_dir + '/*/*.jpg')
    random.shuffle(train_image_list)  # 将列表打乱
    train_num = len(train_image_list)  # 训练集数据量
    assert train_num > 0, 'cannot find any .jpg file in {}'.format(train_dir)
    # 文件路径的倒数第二个是类名,通过类名来获取索引
    train_label_list = [class_dict[path.split(os.path.sep)[-2]] for path in train_image_list]

    # 读取验证集数据
    val_image_list = glob.glob(valid_dir + '/*/*.jpg')
    random.shuffle(val_image_list)
    val_num = len(val_image_list)
    assert val_num > 0, 'cannot find any .jpg file in {}'.format(valid_dir)
    val_label_list = [class_dict[path.split(os.path.sep)[-2]] for path in val_image_list]

    print('using {} images for training, {} images for validation.'.format(train_num, val_num))

    # 对图像进行处理
    def process_path(img_path, label):
        label = tf.one_hot(label, depth=class_num)
        image = tf.io.read_file(img_path)
        image = tf.image.decode_jpeg(image)
        image = tf.image.convert_image_dtype(image, tf.float32)
        image = tf.image.resize(image, [im_height, im_width])
        return image, label

    # 自动寻找合适的参数
    AUTOTUNE = tf.data.experimental.AUTOTUNE
    # 创建训练数据集,将数据集打乱,使用map将process_path应用到数据集上,并对数据集做处理,加快读取速度
    train_dataset = tf.data.Dataset.from_tensor_slices((train_image_list, train_label_list))
    train_dataset = train_dataset.shuffle(buffer_size=train_num) \
        .map(process_path, num_parallel_calls=AUTOTUNE) \
        .repeat().batch(batch_size).prefetch(AUTOTUNE)

    val_dataset = tf.data.Dataset.from_tensor_slices((val_image_list, val_label_list))
    val_dataset = val_dataset.map(process_path, num_parallel_calls=AUTOTUNE) \
        .repeat().batch(batch_size)

    # 实例化模型
    model = AlexNet_v1(im_height=im_height, im_width=im_width, num_classes=5)
    model.summary()
    # 损失函数
    loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
    # 优化器
    optimizer = tf.keras.optimizers.Adam(lr=5e-4)

    # 训练集损失观测指标
    train_loss = tf.keras.metrics.Mean(name='train_loss')
    # 训练集交叉熵准确率观测指标
    train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_acc')

    valid_loss = tf.keras.metrics.Mean(name='valid_loss')
    val_accuracy = tf.keras.metrics.CategoricalAccuracy(name='valid_acc')

    # 使用@tf.function装饰,将函数以图执行模式运行
    @tf.function
    def train_step(images, labels):
        with tf.GradientTape() as tape:
            predictions = model(images, training=True)
            loss = loss_object(labels, predictions)
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))

        train_loss(loss)
        train_accuracy(labels, predictions)

    @tf.function
    def valid_step(images, labels):
        predictions = model(images, training=False)
        t_loss = loss_object(labels, predictions)

        valid_loss(t_loss)
        val_accuracy(labels, predictions)

    best_valid_loss = float('inf')  # 初始化最小的验证集损失为inf
    train_step_num = train_num // batch_size
    val_step_num = val_num // batch_size
    for epoch in range(1, epochs + 1):
        train_loss.reset_states()  # 重置参数
        train_accuracy.reset_states()
        valid_loss.reset_states()
        val_accuracy.reset_states()

        t1 = time.perf_counter()  # 记录运行时间
        for index, (images, labels) in enumerate(train_dataset):
            train_step(images, labels)
            if index + 1 == train_step_num:  # 达到每轮的批次数时,结束该轮训练
                break
        print(time.perf_counter() - t1)
        for index, (images, labels) in enumerate(val_dataset):
            valid_step(images, labels)
            if index + 1 == val_step_num:
                break
        template = 'Epoch {}, Loss: {}, Accuracy: {:.3f}, Test Loss: {}, Test Accuracy: {:.3f}'
        print(template.format(epoch,
                              train_loss.result(),
                              train_accuracy.result() * 100,
                              valid_loss.result(),
                              val_accuracy.result() * 100))
        if valid_loss.result() < best_valid_loss:
            best_valid_loss = valid_loss.result()
            model.save_weights("./save_weights/Alex-{}-{:.3f}.ckpt".format(epoch, best_valid_loss), save_format='tf')


if __name__ == '__main__':
    main()

Epoch 8, Loss: 0.7458328008651733, Accuracy: 71.701, Test Loss: 0.7609016299247742, Test Accuracy: 69.176
2.3032854999999977
Epoch 9, Loss: 0.6685250997543335, Accuracy: 73.542, Test Loss: 0.7595872282981873, Test Accuracy: 71.733
2.2823238000000003
Epoch 10, Loss: 0.6252275109291077, Accuracy: 76.215, Test Loss: 0.803882360458374, Test Accuracy: 69.602

对图像进行预测

import os
import json
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from model import AlexNet_v1


def main():
    im_height = 224
    im_width = 224
    # 读取图片,并对图像做预处理
    img_path = '../Alex_Torch/img.png'
    img = Image.open(img_path)

    img = img.resize((im_height, im_width))
    plt.imshow(img)

    img = np.array(img) / 255.
    img = np.expand_dims(img, 0)

    # 读取索引对应的类
    json_path = './class_indices.json'
    with open(json_path, 'r') as f:
        class_dict = json.load(f)
    # 实例化模型,并加载权重
    model = AlexNet_v1(num_classes=5)
    weights_path = './weights/AlexNet.h5'
    model.load_weights(weights_path)

    # 缩减批次维度
    result = np.squeeze(model.predict(img))
    predict_class = np.argmax(result)  # 获取概率最大的索引
    # 输出图片预测类和预测类的概率
    print_res = "class: {}   prob: {:.3}".format(class_dict[str(predict_class)],
                                                 result[predict_class])
    plt.title(print_res)
    for i in range(len(result)):
        print("class: {:10}   prob: {:.3}".format(class_dict[str(i)],
                                                  result[i]))
    plt.show()


if __name__ == '__main__':
    main()

image.png

posted @ 2022-07-19 16:12  里列昂遗失的记事本  阅读(220)  评论(0编辑  收藏  举报