VGG19网络结构
VGG的创新点包括使用3x3小核替换传统的5x5或7x7大核卷积,其整个网络除全连接层外,均采用步长为 1 填充为 1 的 3x3 卷积 + ReLU 和步长为 2 填充为 0 的 2x2 最大池化,网络结构非常简洁。VGG网络的输入维度为 3x224x224,在全连接层之前没有使用全局平均值池化压缩特征图的空间维度,而是直接将特征图的后 3 维 Flatten 后输入到全连接层中,这样导致第一个全连接层有较多的参数,且VGG包括 3 个全连接层,在训练时需要使用Dropout等正则化方法防止过拟合。VGG19的网络结构如下图所示:
PyTorch复现代码
# VGG19.py
import torch
import torch.nn as nn
class Conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, groups=1, activation=True):
super(Conv, self).__init__()
padding = kernel_size // 2 if padding is None else padding
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride,
padding, groups=groups, bias=True)
self.act = nn.ReLU(inplace=True) if activation else nn.Identity()
def forward(self, x):
return self.act(self.conv(x))
class VGG19(nn.Module):
def __init__(self, num_classes):
super(VGG19, self).__init__()
self.stages = nn.Sequential(*[
self._make_stage(3, 64, num_blocks=2, max_pooling=True),
self._make_stage(64, 128, num_blocks=2, max_pooling=True),
self._make_stage(128, 256, num_blocks=4, max_pooling=True),
self._make_stage(256, 512, num_blocks=4, max_pooling=True),
self._make_stage(512, 512, num_blocks=4, max_pooling=True)
])
self.head = nn.Sequential(*[
nn.Flatten(start_dim=1, end_dim=-1),
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes)
])
@staticmethod
def _make_stage(in_channels, out_channels, num_blocks, max_pooling):
layers = [Conv(in_channels, out_channels, kernel_size=3, stride=1)]
for _ in range(1, num_blocks):
layers.append(Conv(out_channels, out_channels, kernel_size=3, stride=1))
if max_pooling:
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
return nn.Sequential(*layers)
def forward(self, x):
return self.head(self.stages(x))
if __name__ == "__main__":
inputs = torch.rand((8, 3, 224, 224)).cuda()
model = VGG19(num_classes=1000).cuda().train()
outputs = model(inputs)
print(outputs.shape)
更多推荐
简单易懂的PyTorch版VGG19复现代码
发布评论