This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@gmail.com based on:
class
ResBlock
[source]
ResBlock
(expansion
,ni
,nf
,stride
=1
,groups
=1
,reduction
=None
,nh1
=None
,nh2
=None
,dw
=False
,g2
=1
,sa
=False
,sym
=False
,norm_type
=<NormType.Batch: 1>
,act_cls
=ReLU
,ndim
=2
,ks
=3
,pool
=AvgPool
,pool_first
=True
,padding
=None
,bias
=None
,bn_1st
=True
,transpose
=False
,init
='auto'
,xtra
=None
,bias_std
=0.01
,dilation
:Union
[int
,Tuple
[int
,int
]]=1
,padding_mode
:str
='zeros'
) ::Module
Resnet block from ni
to nh
with stride
class
ResNet
[source]
ResNet
(c_in
,c_out
) ::Module
Same as nn.Module
, but no need for subclasses to call super().__init__
xb = torch.rand(2, 3, 4)
test_eq(ResNet(3,2)(xb).shape, [xb.shape[0], 2])
test_eq(total_params(ResNet(3, 2))[0], 479490) # for (3,2)
ResNet(3,2)
ResNet( (resblock1): ResBlock( (convblock1): ConvBlock( (0): Conv1d(3, 64, kernel_size=(7,), stride=(1,), padding=(3,), bias=False) (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): ReLU() ) (convblock2): ConvBlock( (0): Conv1d(64, 64, kernel_size=(5,), stride=(1,), padding=(2,), bias=False) (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): ReLU() ) (convblock3): ConvBlock( (0): Conv1d(64, 64, kernel_size=(3,), stride=(1,), padding=(1,), bias=False) (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (shortcut): ConvBlock( (0): Conv1d(3, 64, kernel_size=(1,), stride=(1,), bias=False) (1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (add): Add() (act): ReLU() ) (resblock2): ResBlock( (convblock1): ConvBlock( (0): Conv1d(64, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False) (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): ReLU() ) (convblock2): ConvBlock( (0): Conv1d(128, 128, kernel_size=(5,), stride=(1,), padding=(2,), bias=False) (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): ReLU() ) (convblock3): ConvBlock( (0): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False) (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (shortcut): ConvBlock( (0): Conv1d(64, 128, kernel_size=(1,), stride=(1,), bias=False) (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (add): Add() (act): ReLU() ) (resblock3): ResBlock( (convblock1): ConvBlock( (0): Conv1d(128, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False) (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): ReLU() ) (convblock2): ConvBlock( (0): Conv1d(128, 128, kernel_size=(5,), stride=(1,), padding=(2,), bias=False) (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): ReLU() ) (convblock3): ConvBlock( (0): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False) (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (shortcut): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (add): Add() (act): ReLU() ) (gap): AdaptiveAvgPool1d(output_size=1) (squeeze): Squeeze() (fc): Linear(in_features=128, out_features=2, bias=True) )