This is an unofficial PyTorch implementation by Ignacio Oguiza of - oguiza@gmail.com based on Temporal Convolutional Network (Bai, 2018).

class XCMPlus[source]

XCMPlus(c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128, window_perc:float=1.0, flatten:bool=False, custom_head:callable=None, concat_pool:bool=False, fc_dropout:float=0.0, bn:bool=False, y_range:tuple=None, **kwargs) :: Sequential

A sequential container. Modules will be added to it in the order they are passed in the constructor. Alternatively, an ordered dict of modules can also be passed in.

To make it easier to understand, here is a small example::

# Example of using Sequential
model = nn.Sequential(
          nn.Conv2d(1,20,5),
          nn.ReLU(),
          nn.Conv2d(20,64,5),
          nn.ReLU()
        )

# Example of using Sequential with OrderedDict
model = nn.Sequential(OrderedDict([
          ('conv1', nn.Conv2d(1,20,5)),
          ('relu1', nn.ReLU()),
          ('conv2', nn.Conv2d(20,64,5)),
          ('relu2', nn.ReLU())
        ]))
from tsai.data.all import *
from tsai.models.XCM import *

dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, split_data=False)
tfms = [None, Categorize()]
dls = get_ts_dls(X, y, splits=splits, tfms=tfms)
model =  XCMPlus(dls.vars, dls.c, dls.len)
learn = Learner(dls, model, metrics=accuracy)
xb, yb = dls.one_batch()

bs, c_in, seq_len = xb.shape
c_out = len(np.unique(yb))

model = XCMPlus(c_in, c_out, seq_len, fc_dropout=.5)
test_eq(model(xb).shape, (bs, c_out))
model = XCMPlus(c_in, c_out, seq_len, concat_pool=True)
test_eq(model(xb).shape, (bs, c_out))
model = XCMPlus(c_in, c_out, seq_len)
test_eq(model(xb).shape, (bs, c_out))
test_eq(count_parameters(XCMPlus(c_in, c_out, seq_len)), count_parameters(XCM(c_in, c_out, seq_len)))
model
XCMPlus(
  (backbone): _XCMPlus_Backbone(
    (conv2dblock): Sequential(
      (0): Unsqueeze(dim=1)
      (1): Conv2dSame(
        (conv2d_same): Conv2d(1, 128, kernel_size=(1, 51), stride=(1, 1))
      )
      (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (3): ReLU()
    )
    (conv2d1x1block): Sequential(
      (0): Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))
      (1): ReLU()
      (2): Squeeze(dim=1)
    )
    (conv1dblock): Sequential(
      (0): Conv1d(24, 128, kernel_size=(51,), stride=(1,), padding=(25,))
      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU()
    )
    (conv1d1x1block): Sequential(
      (0): Conv1d(128, 1, kernel_size=(1,), stride=(1,))
      (1): ReLU()
    )
    (concat): Concat(dim=1)
    (conv1d): Sequential(
      (0): Conv1d(25, 128, kernel_size=(51,), stride=(1,), padding=(25,))
      (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU()
    )
  )
  (head): Sequential(
    (0): GAP1d(
      (gap): AdaptiveAvgPool1d(output_size=1)
      (flatten): Flatten(full=False)
    )
    (1): LinBnDrop(
      (0): Linear(in_features=128, out_features=6, bias=True)
    )
  )
)
model.show_gradcam(xb[0], yb[0])
bs = 16
n_vars = 3
seq_len = 12
c_out = 10
xb = torch.rand(bs, n_vars, seq_len)
new_head = partial(conv_lin_3d_head, d=(5, 2))
net = XCMPlus(n_vars, c_out, seq_len, custom_head=new_head)
print(net(xb).shape)
net.head
torch.Size([16, 5, 2])
create_conv_lin_3d_head(
  (0): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (1): Conv1d(128, 5, kernel_size=(1,), stride=(1,), bias=False)
  (2): Transpose(-1, -2)
  (3): BatchNorm1d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (4): Transpose(-1, -2)
  (5): Linear(in_features=12, out_features=2, bias=False)
)
bs = 16
n_vars = 3
seq_len = 12
c_out = 2
xb = torch.rand(bs, n_vars, seq_len)
net = XCMPlus(n_vars, c_out, seq_len)
change_model_head(net, create_pool_plus_head, concat_pool=False)
print(net(xb).shape)
net.head
torch.Size([16, 2])
Sequential(
  (0): AdaptiveAvgPool1d(output_size=1)
  (1): Flatten(full=False)
  (2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (3): Linear(in_features=128, out_features=512, bias=False)
  (4): ReLU(inplace=True)
  (5): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (6): Linear(in_features=512, out_features=2, bias=False)
)