はじめに
pytorchによる公式のResNetのコードはこちらに公開されている。
公式のライブラリーでは以下のresnetシリーズが実装されている。
- resnet18
- resnet34
- resnet50
- resnet101
- resnet152
- resnet50_32x4d
- resnet101_32x8d
- wide_resnet50_2
- wide_resnet101_2
その他に論文(https://arxiv.org/pdf/1512.03385.pdf)
では他にresnet20が使用されている。
本記事の目的
先ほどの実装シリーズの通り、resnet20が公式では実装されていないので、他の人の買いたpytorchの実装コードを紹介、解説していく。
注意:自分用の記事でもあるのですみません。
ResNet20の構造
引用元論文:(https://www.researchgate.net/publication/332522209_Gotta_Catch_%27Em_All_Using_Concealed_Trapdoors_to_Detect_Adversarial_Attacks_on_Neural_Networks)
ResNet20コード
残差ブロック
resnet20.py
class resblock(nn.Module):
def __init__(self, in_channels, out_channels, return_before_act):
super(resblock, self).__init__()
self.return_before_act = return_before_act
self.downsample = (in_channels != out_channels)
if self.downsample:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False)
self.ds = nn.Sequential(*[
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(out_channels)
])
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.ds = None
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
residual = x
pout = self.conv1(x) # pout: pre out before activation
pout = self.bn1(pout)
pout = self.relu(pout)
pout = self.conv2(pout)
pout = self.bn2(pout)
if self.downsample:
residual = self.ds(x)
pout += residual
out = self.relu(pout)
if not self.return_before_act:
return out
else:
return pout, out
ResNet20
resnet20.py
class resnet20(nn.Module):
def __init__(self, num_class):
super(resnet20, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU()
self.res1 = self.make_layer(resblock, 3, 16, 16)
self.res2 = self.make_layer(resblock, 3, 16, 32)
self.res3 = self.make_layer(resblock, 3, 32, 64)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64, num_class)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.num_class = num_class
def make_layer(self, block, num, in_channels, out_channels): # num must >=2
layers = [block(in_channels, out_channels, False)]
for i in range(num-2):
layers.append(block(out_channels, out_channels, False))
layers.append(block(out_channels, out_channels, True))
return nn.Sequential(*layers)
def forward(self, x):
pstem = self.conv1(x) # pstem: pre stem before activation
pstem = self.bn1(pstem)
stem = self.relu(pstem)
stem = (pstem, stem)
rb1 = self.res1(stem[1])
rb2 = self.res2(rb1[1])
rb3 = self.res3(rb2[1])
feat = self.avgpool(rb3[1])
feat = feat.view(feat.size(0), -1)
out = self.fc(feat)
return stem, rb1, rb2, rb3, feat, out
def get_channel_num(self):
return [16, 16, 32, 64, 64, self.num_class]
def get_chw_num(self):
return [(16, 32, 32),
(16, 32, 32),
(32, 16, 16),
(64, 8 , 8 ),
(64,),
(self.num_class,)]