LSTM_FCN(
(rnn): LSTM(2, 100, batch_first=True)
(rnn_dropout): Dropout(p=0.8, inplace=False)
(convblock1): ConvBlock(
(0): Conv1d(3, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
(1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(se1): SqueezeExciteBlock(
(avg_pool): GAP1d(
(gap): AdaptiveAvgPool1d(output_size=1)
(flatten): Flatten(full=False)
)
(fc): Sequential(
(0): Linear(in_features=128, out_features=16, bias=False)
(1): ReLU()
(2): Linear(in_features=16, out_features=128, bias=False)
(3): Sigmoid()
)
)
(convblock2): ConvBlock(
(0): Conv1d(128, 256, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
(1): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(se2): SqueezeExciteBlock(
(avg_pool): GAP1d(
(gap): AdaptiveAvgPool1d(output_size=1)
(flatten): Flatten(full=False)
)
(fc): Sequential(
(0): Linear(in_features=256, out_features=32, bias=False)
(1): ReLU()
(2): Linear(in_features=32, out_features=256, bias=False)
(3): Sigmoid()
)
)
(convblock3): ConvBlock(
(0): Conv1d(256, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
(1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
)
(gap): GAP1d(
(gap): AdaptiveAvgPool1d(output_size=1)
(flatten): Flatten(full=False)
)
(concat): Concat(1)
(fc): Linear(in_features=228, out_features=12, bias=True)
)