This contains fastai Learner extensions.

Learner.show_batch[source]

Learner.show_batch(**kwargs)

Learner.one_batch[source]

Learner.one_batch(i, b)

Learner.save_all[source]

Learner.save_all(path='export', dls_fname='dls', model_fname='model', learner_fname='learner', verbose=False)

load_all[source]

load_all(path='export', dls_fname='dls', model_fname='model', learner_fname='learner', device=None, pickle_module=pickle, verbose=False)

Recorder.plot_metrics[source]

Recorder.plot_metrics(nrows=None, ncols=None, figsize=None, final_losses=True, perc=0.5, imsize=3, suptitle=None, sharex=False, sharey=False, squeeze=True, subplot_kw=None, gridspec_kw=None)

Learner.plot_metrics[source]

Learner.plot_metrics(nrows=1, ncols=1, figsize=None, imsize=3, suptitle=None, sharex=False, sharey=False, squeeze=True, subplot_kw=None, gridspec_kw=None)

Learner.show_probas[source]

Learner.show_probas(figsize=(6, 6), ds_idx=1, dl=None, one_batch=False, max_n=None, nrows=1, ncols=1, imsize=3, suptitle=None, sharex=False, sharey=False, squeeze=True, subplot_kw=None, gridspec_kw=None)

ts_learner[source]

ts_learner(dls, arch=None, c_in=None, c_out=None, seq_len=None, d=None, splitter=trainable_params, loss_func=None, opt_func=Adam, lr=0.001, cbs=None, metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85, 0.95), device=None, verbose=False, pretrained=False, weights_path=None, exclude_head=True, cut=-1, init=None)

tsimage_learner[source]

tsimage_learner(dls, arch=None, pretrained=False, loss_func=None, opt_func=Adam, lr=0.001, cbs=None, metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85, 0.95), c_in=None, c_out=None, device=None, verbose=False, init=None, p=0.0, n_out=1000, stem_szs=(32, 32, 64), widen=1.0, sa=False, act_cls=ReLU, ndim=2, ks=3, stride=2, groups=1, reduction=None, nh1=None, nh2=None, dw=False, g2=1, sym=False, norm_type=<NormType.Batch: 1>, pool=AvgPool, pool_first=True, padding=None, bias=None, bn_1st=True, transpose=False, xtra=None, bias_std=0.01, dilation:Union[int, Tuple[int, int]]=1, padding_mode:str='zeros')

Learner.decoder[source]

Learner.decoder(o)

Learner.get_X_preds[source]

Learner.get_X_preds(X, y=None, with_input=False, with_loss=False, save_preds=None, save_targs=None, concat_dim=0)

from tsai.data.all import *
from tsai.data.core import *
from tsai.models.FCNPlus import *
dsid = 'OliveOil'
X, y, splits = get_UCR_data(dsid, verbose=True, split_data=False)
tfms  = [None, [Categorize()]]
dls = get_ts_dls(X, y, splits=splits, tfms=tfms)
learn = ts_learner(dls, FCNPlus)
for p in learn.model.parameters():
    p.requires_grad=False
test_eq(count_parameters(learn.model), 0)
learn.freeze()
test_eq(count_parameters(learn.model), 1540)
learn.unfreeze()
test_eq(count_parameters(learn.model), 264580)
Dataset: OliveOil
X      : (60, 1, 570)
y      : (60,)
splits : (#30) [0,1,2,3,4,5,6,7,8,9...] (#30) [30,31,32,33,34,35,36,37,38,39...] 

learn.show_batch();
learn.fit_one_cycle(2, lr_max=1e-3)
epoch train_loss valid_loss time
0 1.376175 1.302689 00:04
1 1.373613 1.304578 00:03
dsid = 'OliveOil'
X, y, splits = get_UCR_data(dsid, split_data=False)
tfms  = [None, [Categorize()]]
dls = get_ts_dls(X, y, tfms=tfms, splits=splits)
learn = ts_learner(dls, FCNPlus, metrics=accuracy)
learn.fit_one_cycle(2)
learn.plot_metrics()
learn.show_probas()
epoch train_loss valid_loss accuracy time
0 1.411673 1.390990 0.166667 00:03
1 1.408559 1.363262 0.400000 00:04
learn.save_all()
del learn
learn = load_all()
test_probas, test_targets, test_preds = learn.get_X_preds(X[0:10], with_decoded=True)
test_probas, test_targets, test_preds
(tensor([[0.2530, 0.2430, 0.2254, 0.2785],
         [0.2530, 0.2430, 0.2254, 0.2786],
         [0.2530, 0.2431, 0.2254, 0.2786],
         [0.2530, 0.2431, 0.2254, 0.2785],
         [0.2530, 0.2431, 0.2254, 0.2785],
         [0.2529, 0.2430, 0.2254, 0.2786],
         [0.2529, 0.2430, 0.2254, 0.2786],
         [0.2529, 0.2430, 0.2254, 0.2787],
         [0.2529, 0.2431, 0.2254, 0.2786],
         [0.2529, 0.2430, 0.2254, 0.2786]]),
 None,
 tensor([3, 3, 3, 3, 3, 3, 3, 3, 3, 3]))
learn.fit_one_cycle(1, lr_max=1e-3)
epoch train_loss valid_loss accuracy time
0 1.365514 1.360358 0.400000 00:03