VGG网络的Pytorch官方实现过程解读
在Pytorch中,已经实现了一部分经典的网络模型,这其中就包括VGG。
VGG的代码在哪里?
你可以在以下路径中发现该文件:
D:\Python\Anaconda3\envs\torch\lib\site-packages\torchvision\models\vgg.py
envs 以前的路径由你安装的路径决定。
调用时,如下:
import torchvision.models as models
vgg16 = models.vgg16(pretrained=pretrained) # 带预训练权重的VGG16
你也可以将鼠标放在 vgg16 文字上方,按住 Ctrl 的同时,点击它,跳转到该文件中。
完整代码
该文件已经被我注释过,完整代码如下:
import torch
import torch.nn as nn
from .utils import load_state_dict_from_url
# ------------------------------------------------------------------------------
# 暴露接口
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
# ------------------------------------------------------------------------------
# 预训练权重下载地址
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
# ------------------------------------------------------------------------------
class VGG(nn.Module):
'''
VGG通用网络模型
输入features为网络的特征提取部分网络层列表
分类数为 1000
'''
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
# 特征提取部分
self.features = features
# 自适应平均池化,特征图池化到 7×7 大小
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
# 分类部分
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096), # 512*7*7 --> 4096
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096), # 4096 --> 4096
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes), # 4096 --> 1000
)
# 权重初始化
if init_weights:
self._initialize_weights()
def forward(self, x):
# 特征提取
x = self.features(x)
# 自适应平均池化
x = self.avgpool(x)
# 特征图展平成向量
x = torch.flatten(x, 1)
# 分类器分类输出
x = self.classifier(x)
return x
def _initialize_weights(self):
'''
权重初始化
'''
for m in self.modules():
if isinstance(m, nn.Conv2d):
# 卷积层使用 kaimming 初始化
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
# 偏置初始化为0
if m.bias is not None:
nn.init.constant_(m.bias, 0)
# 批归一化层权重初始化为1
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# 全连接层权重初始化
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
# ------------------------------------------------------------------------------
def make_layers(cfg, batch_norm=False):
'''
根据配置表,返回模型层列表
'''
layers = [] # 层列表初始化
in_channels = 3 # 输入3通道图像
# 遍历配置列表
for v in cfg:
if v == 'M': # 添加池化层
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else: # 添加卷积层
# 3×3 卷积
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
# 卷积-->批归一化(可选)--> ReLU激活
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
# 通道数方面,下一层输入即为本层输出
in_channels = v
# 以sequencial类型返回模型层列表
return nn.Sequential(*layers)
# 网络参数配置表
'''
数字代表通道数,如 64 表示输出 64 通道特征图,对应于论文中的 Conv3-64;
M 代表最大池化操作,对应于论文中的 maxpool
A-LRN使用了局部归一化响应,C网络存在1×1卷积,这两个网络比较特殊,所以排除在配置表中
'''
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
# ------------------------------------------------------------------------------
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
'''
通用网络构造器,主要实现网络模型生成,以及预训练权重的导入
'''
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
# ------------------------------------------------------------------------------
def vgg11(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
实现原理
下面,简单分析一下它是如何实现VGG论文中的VGG11到VGG19这几种网络的。
整体框图
首先看下面这张图,整个代码的逻辑关系如下:
原文回顾
根据论文中各个网络结构的参数,可发现,A-LRN和C网络比较特殊,一个使用了局部归一化响应,另一个使用了 1×1 卷积,剩下的网络结构的组件都是通用的,如卷积、池化、全连接等。因此pytorch官方选择将 A、B、D、E作为 VGG 调用的全部网络。
实现过程分析
根据网络结构和参数设置,构造 cfgs 字典,存放这些结构参数:
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
其中,数字代表卷积层输出特征图通道数,M 代表最大池化层。
根据 cfgs 的配置参数,使用 make_layers 函数,来自动创建 Sequential 网络层列表。
def make_layers(cfg, batch_norm=False):
'''
根据配置表,返回模型层列表
'''
layers = [] # 层列表初始化
in_channels = 3 # 输入3通道图像
# 遍历配置列表
for v in cfg:
if v == 'M': # 添加池化层
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else: # 添加卷积层
# 3×3 卷积
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
# 卷积-->批归一化(可选)--> ReLU激活
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
# 通道数方面,下一层输入即为本层输出
in_channels = v
# 以sequencial类型返回模型层列表
return nn.Sequential(*layers)
这里卷积层全部为 3×3 的卷积核,然后批归一化为可选项。
make_layers 返回的 Sequential 对应的就是网络的特征提取部分的网络结构,因此,进一步地在 VGG这个类中,来构造一个通用的VGG网络模型。
class VGG(nn.Module):
'''
VGG通用网络模型
输入features为网络的特征提取部分网络层列表
分类数为 1000
'''
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
# 特征提取部分
self.features = features
# 自适应平均池化,特征图池化到 7×7 大小
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
# 分类部分
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096), # 512*7*7 --> 4096
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096), # 4096 --> 4096
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes), # 4096 --> 1000
)
# 权重初始化
if init_weights:
self._initialize_weights()
def forward(self, x):
# 特征提取
x = self.features(x)
# 自适应平均池化
x = self.avgpool(x)
# 特征图展平成向量
x = torch.flatten(x, 1)
# 分类器分类输出
x = self.classifier(x)
return x
def _initialize_weights(self):
'''
权重初始化
'''
for m in self.modules():
if isinstance(m, nn.Conv2d):
# 卷积层使用 kaimming 初始化
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
# 偏置初始化为0
if m.bias is not None:
nn.init.constant_(m.bias, 0)
# 批归一化层权重初始化为1
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# 全连接层权重初始化
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
这里 Pytorch 官方做了一个小改动,那就是在特征提取部分完成后,使用一个自适应池化层将特征图的宽和高池化到 7×7 大小,这样做的目的是让下一步的 flatten 操作得到的是一个固定长度的向量,从而让网络能够接受任意尺寸的图像输入(原文中的输入图像尺寸为224×224)。展平后的向量输入到由三个全连接层构成的分类器中进行分类,这里 Pytorch 官方加入了两次 dropout 操作。此外,权重的初始化也进行了定义。
为了实现VGG11到VGG19的这几种网络,设计了一个 _vgg 函数,来自动生成网络模型,以及预训练权重的导入。
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
'''
通用网络构造器,主要实现网络模型生成,以及预训练权重的导入
'''
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
预训练权重的下载地址定义在了 model_urls 里,如果你的网络不是很好,那么在调用时,会下载的很慢。假如你已经下载了对应的权重文件,那么也可以改写这个路径,以免重新下载。
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
最后,为每一种网络分别写了对应的函数,来实现网络的生成,例如vgg16网络,不带BN层和带BN层的两种:
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
心得
- 使用 nn.Sequential 能够让网络结构自定义更方便快捷,但仅限于这种具有线性堆叠结构的网络
- 通过设置网络结构参数表,来定义一个网络的骨架,是很好的建模范式,值得借鉴
- 学会发现网络结构之间的相似性,然后提炼通用的构造函数,这样可以大大节省开发时间,尤其是在需要设计大量的对照网络时,这种优势更明显。