import torch
import time
from torch import nn
# 初始的卷积层,对输入的图片进行处理成feature map
class Conv1():
def __init__(self,inp_channels,out_channels,stride = 2):
super(Conv1,self).__init__()
= (
nn.Conv2d(inp_channels,out_channels,kernel_size=7,stride=stride,padding=3,bias=False),# 卷积的结果(i - k + 2*p)/s + 1,此时图像大小缩小一半
nn.BatchNorm2d(out_channels),
(inplace=True),
nn.MaxPool2d(kernel_size=3,stride=2,padding=1)# 根据卷积的公式,该feature map尺寸变为原来的一半
)
def forward(self,x):
y = (x)
return y
class Simple_Res_Block():
def __init__(self,inp_channels,out_channels,stride=1,downsample = False,expansion_=False):
super(Simple_Res_Block,self).__init__()
= downsample
if expansion_:
= 4# 将维度扩展成expansion倍
else:
= 1
= (
nn.Conv2d(inp_channels,out_channels,kernel_size=3,stride=stride,padding=1),
nn.BatchNorm2d(out_channels),
(inplace=True),
nn.Conv2d(out_channels,out_channels*,kernel_size=3,padding=1),
nn.BatchNorm2d(out_channels*)
)
if :
= (
nn.Conv2d(inp_channels,out_channels*,kernel_size=1,stride=stride,bias=False),
nn.BatchNorm2d(out_channels*)
)
= (inplace=True)
def forward(self,input):
residual = input
x = (input)
if :
residual = (residual)# 使x和h的维度相同
out = residual + x
out = (out)
return out
class Residual_Block():
def __init__(self,inp_channels,out_channels,stride=1,downsample = False,expansion_=False):
super(Residual_Block,self).__init__()
= downsample# 判断是否对x进行下采样使x和该模块输出值维度通道数相同
if expansion_:
= 4# 将维度扩展成expansion倍
else:
= 1
# 模块
self.conv1 = nn.Conv2d(inp_channels,out_channels,kernel_size=1,stride=1,bias=False)# 不对特征图尺寸发生改变,起映射作用
= (0.5)
self.BN1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels,out_channels,kernel_size=3,stride=stride,padding=1,bias=False)# 此时卷积核大小和填充大小不会影响特征图尺寸大小,由步长决定
self.BN2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels,out_channels*,kernel_size=1,stride=1,bias=False)# 改变通道数
self.BN3 = nn.BatchNorm2d(out_channels*)
= (inplace=True)
if :
= (
nn.Conv2d(inp_channels,out_channels*,kernel_size=1,stride=stride,bias=False),
nn.BatchNorm2d(out_channels*)
)
def forward(self,input):
residual = input
x = (self.BN1(self.conv1(input)))
x = (self.BN2(self.conv2(x)))
h = self.BN3(self.conv3(x))
if :
residual = (residual)# 使x和h的维度相同
out = h + residual# 残差部分
out = (out)
return out
class Resnet():
def __init__(self,net_block,block,num_class = 1000,expansion_=False):
super(Resnet,self).__init__()
self.expansion_ = expansion_
if expansion_:
= 4# 将维度扩展成expansion倍
else:
= 1
# 输入的初始图像经过的卷积
# (3*64*64) --> (64*56*56)
= Conv1(3,64)
# 构建模块
# (64*56*56) --> (256*56*56)
self.block1 = self.make_layer(net_block,block[0],64,64,expansion_=self.expansion_,stride=1)# stride为1,不改变尺寸大小
# (256*56*56) --> (512*28*28)
self.block2 = self.make_layer(net_block,block[1],64*,128,expansion_=self.expansion_,stride=2)
# (512*28*28) --> (1024*14*14)
self.block3 = self.make_layer(net_block,block[2],128*,256,expansion_=self.expansion_,stride=2)
# (1024*14*14) --> (2048*7*7)
self.block4 = self.make_layer(net_block,block[3],256*,512,expansion_=self.expansion_,stride=2)
= nn.AvgPool2d(7,stride=1)# (2048*7*7) --> (2048*1*1)经过平均池化层将所有像素融合并取平均
if expansion_:
length = 2048
else:
length = 512
= (length,num_class)
for m in ():
if isinstance(m, nn.Conv2d):
.kaiming_normal_(, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
.constant_(, 1)
.constant_(, 0)
def make_layer(self,net_block,layers,inp_channels,out_channels,expansion_=False,stride = 1):
block = []
(net_block(inp_channels,out_channels,stride=stride,downsample=True,expansion_=expansion_))# 先将上一个模块的通道数缩小为该模块需要的通道数
if expansion_:
= 4
else:
= 1
for i in range(1,layers):
(net_block(out_channels*,out_channels,expansion_=expansion_))
return (*block)
def forward(self,x):
x = (x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
# x = (x)
x = ([0],-1)
x = (x)
return x
def Resnet18():
return Resnet(Simple_Res_Block,[2,2,2,2],num_class=10,expansion_=False)# 此时每个模块里面只有两层卷积
def Resnet34():
return Resnet(Simple_Res_Block,[3,4,6,3],num_class=10,expansion_=False)
def Resnet50():
return Resnet(Residual_Block,[3,4,6,3],expansion_=True)# 也叫50层resnet,这个网络有16个模块,每个模块有三层卷积,最后还剩下初始的卷积和最后的全连接层,总共50层
def Resnet101():
return Resnet(Residual_Block,[3,4,23,3],expansion_=True)
def Resnet152():
return Resnet(Residual_Block,[3,8,36,3],expansion_=True)