This is an unofficial PyTorch implementation by Ignacio Oguiza - oguiza@gmail.com based on:
class
WaveBlock
[source]
WaveBlock
(c_in
,c_out
,seq_len
,wavelet
=None
) ::Module
Same as nn.Module
, but no need for subclasses to call super().__init__
class
mWDN
[source]
mWDN
(c_in
,c_out
,seq_len
,levels
=3
,wavelet
=None
,arch
=InceptionTime
,arch_kwargs
={}
) ::Module
Same as nn.Module
, but no need for subclasses to call super().__init__
bs = 16
c_in = 3
seq_len = 12
c_out = 2
xb = torch.rand(bs, c_in, seq_len)
m = mWDN(c_in, c_out, seq_len)
test_eq(mWDN(c_in, c_out, seq_len)(xb).shape, [bs, c_out])
m
mWDN( (blocks): ModuleList( (0): WaveBlock( (mWDN_H): Linear(in_features=12, out_features=12, bias=True) (mWDN_L): Linear(in_features=12, out_features=12, bias=True) (sigmoid): Sigmoid() (pool): AvgPool1d(kernel_size=(2,), stride=(2,), padding=(0,)) ) (1): WaveBlock( (mWDN_H): Linear(in_features=6, out_features=6, bias=True) (mWDN_L): Linear(in_features=6, out_features=6, bias=True) (sigmoid): Sigmoid() (pool): AvgPool1d(kernel_size=(2,), stride=(2,), padding=(0,)) ) (2): WaveBlock( (mWDN_H): Linear(in_features=3, out_features=3, bias=True) (mWDN_L): Linear(in_features=3, out_features=3, bias=True) (sigmoid): Sigmoid() (pool): AvgPool1d(kernel_size=(2,), stride=(2,), padding=(0,)) ) ) (classifier): InceptionTime( (inceptionblock): InceptionBlock( (inception): ModuleList( (0): InceptionModule( (bottleneck): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False) (convs): ModuleList( (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False) (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False) (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False) ) (maxconvpool): Sequential( (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False) (1): Conv1d(3, 32, kernel_size=(1,), stride=(1,), bias=False) ) (concat): Concat() (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (act): ReLU() ) (1): InceptionModule( (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) (convs): ModuleList( (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False) (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False) (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False) ) (maxconvpool): Sequential( (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False) (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) ) (concat): Concat() (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (act): ReLU() ) (2): InceptionModule( (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) (convs): ModuleList( (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False) (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False) (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False) ) (maxconvpool): Sequential( (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False) (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) ) (concat): Concat() (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (act): ReLU() ) (3): InceptionModule( (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) (convs): ModuleList( (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False) (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False) (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False) ) (maxconvpool): Sequential( (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False) (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) ) (concat): Concat() (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (act): ReLU() ) (4): InceptionModule( (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) (convs): ModuleList( (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False) (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False) (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False) ) (maxconvpool): Sequential( (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False) (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) ) (concat): Concat() (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (act): ReLU() ) (5): InceptionModule( (bottleneck): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) (convs): ModuleList( (0): Conv1d(32, 32, kernel_size=(39,), stride=(1,), padding=(19,), bias=False) (1): Conv1d(32, 32, kernel_size=(19,), stride=(1,), padding=(9,), bias=False) (2): Conv1d(32, 32, kernel_size=(9,), stride=(1,), padding=(4,), bias=False) ) (maxconvpool): Sequential( (0): MaxPool1d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False) (1): Conv1d(128, 32, kernel_size=(1,), stride=(1,), bias=False) ) (concat): Concat() (bn): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (act): ReLU() ) ) (shortcut): ModuleList( (0): ConvBlock( (0): Conv1d(3, 128, kernel_size=(1,), stride=(1,), bias=False) (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (add): Add() (act): ReLU() ) (gap): AdaptiveAvgPool1d(output_size=1) (squeeze): Squeeze() (fc): Linear(in_features=128, out_features=2, bias=True) ) )