--- title: Common components for models keywords: fastai sidebar: home_sidebar summary: "Common functions." description: "Common functions." nb_path: "nbs/models_components__common.ipynb" ---
{% raw %}
{% endraw %} {% raw %}
{% endraw %}

Chomp1d

{% raw %}

class Chomp1d[source]

Chomp1d(chomp_size) :: Module

Receives x input of dim [N,C,T], and trims it so that only 'time available' information is used. Used for one dimensional causal convolutions. : param chomp_size: lenght of outsample values to skip.

{% endraw %} {% raw %}
{% endraw %}

CausalConv1D

{% raw %}

class CausalConv1d[source]

CausalConv1d(in_channels, out_channels, kernel_size, padding, dilation, activation, stride:int=1, with_weight_norm:bool=False) :: Module

Receives x input of dim [N,C,T], computes a unidimensional causal convolution.

Parameters

in_channels: int out_channels: int activation: str https://discuss.pytorch.org/t/call-activation-function-from-string padding: int kernel_size: int dilation: int

Returns: x: tesor torch tensor of dim [N,C,T] activation(conv1d(inputs, kernel) + bias)

{% endraw %} {% raw %}
{% endraw %}

TimeDistributed

{% raw %}

class TimeDistributed2d[source]

TimeDistributed2d(module) :: Module

Receives x input of dim [N,C,T], reshapes it to [T,N,C] Collapses input of dim [T,N,C] to [TxN,C] and applies module to C. Finally reshapes it to [N,C_out,T]. Allows handling of variable sequence lengths and minibatch sizes. : param module: Module to apply input to.

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class TimeDistributed3d[source]

TimeDistributed3d(module) :: Module

Receives x input of dim [N,L,C,T], reshapes it to [T,N,L,C] Collapses input of dim [T,N,L,C] to [TxNxL,C] and applies module to C. Finally reshapes it to [N,L,C_out,T]. Allows handling of variable sequence lengths and minibatch sizes. : param module: Module to apply input to.

{% endraw %} {% raw %}
{% endraw %}

RepeatVector

{% raw %}

class RepeatVector[source]

RepeatVector(repeats) :: Module

Receives x input of dim [N,C], and repeats the vector to create tensor of shape [N, C, K] : repeats: int, the number of repetitions for the vector.

{% endraw %} {% raw %}
{% endraw %}

L1Regularizer

{% raw %}

class L1Regularizer[source]

L1Regularizer(in_features, l1_lambda) :: Module

Layer meant to apply elementwise L1 regularization to a dimension. Receives x input of dim [N,C] and returns the input [N,C].

{% endraw %} {% raw %}
{% endraw %} {% raw %}
import numpy as np
from sklearn import linear_model

np.random.seed(1)

X1  = np.random.normal(0, 1, (1000,1))
X   = np.random.normal(0, 1, (1000, 99))
X   = np.concatenate([X1, X], axis=1)
eps = np.random.normal(0, 0.1, (1000))
beta = np.array([1] + [0]*99)
Y =  X @ beta.T + eps
Y = np.expand_dims(Y, 1)
print("X.shape", X.shape)
print("beta.shape", beta.shape)
print("Y.shape", Y.shape)

# model = linear_model.Lasso(alpha=0.1)
# model.fit(X, Y)
# print("model.coef_.shape", model.coef_.shape)
# model.coef_
X.shape (1000, 100)
beta.shape (100,)
Y.shape (1000, 1)
{% endraw %} {% raw %}
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset

import numpy as np
import time
from scipy.stats import hmean
import matplotlib.pyplot as plt
%matplotlib inline
{% endraw %} {% raw %}
class _Model(nn.Module):  

    def __init__(self, in_features, l1_lambda):
        super(_Model, self).__init__()
        self.l1 = L1Regularizer(in_features, l1_lambda)
        self.linear_layer = nn.Linear(in_features=in_features, 
                                      out_features=1, 
                                      bias=False)

    def forward(self, x):
        x = self.l1(x.float())
        y_hat = self.linear_layer(x)
        return y_hat
    
class Data(Dataset):
    
    # Constructor
    def __init__(self, Y, X):
        self.X = X
        self.Y = Y
        self.len = Y.shape[0]

    # Getter
    def __getitem__(self, index):          
        return self.X[index], self.Y[index]
    
    # Get Length
    def __len__(self):
        return self.len
{% endraw %} {% raw %}
model = _Model(in_features=X.shape[1], l1_lambda=0.07)
dataloader = DataLoader(dataset=Data(X=X, Y=Y), batch_size=512)
optimizer = optim.Adam(model.parameters(), lr=0.001)

print(model)

def train_model(model, epochs, print_progress=False):
    start = time.time()
    step = 0 
    training_trajectory = {'epoch': [],
                           'train_loss': []}
    
    criterion = t.nn.MSELoss()
    
    for epoch in range(epochs):
        for x, y in dataloader:
            x, y = x.float(), y.float() # Type compatibility
            
            step += 1
            y_hat = model(x)
            
            training_loss = criterion(y, y_hat) + model.l1.regularization()
            
            optimizer.zero_grad()
            training_loss.backward()
            optimizer.step()
            
        if epoch % 100 == 0: 
            training_trajectory['epoch'].append(epoch)
            train_loss = training_loss.detach().numpy()
            training_trajectory['train_loss'].append(train_loss)

            
            display_str = f'epoch: {epoch} step: {step} time: {time.time()-start:03.3f} ** '
            display_str += f'train_loss: {train_loss:.4f}'
            print(display_str)
            
    return model, training_trajectory
_Model(
  (l1): L1Regularizer()
  (linear_layer): Linear(in_features=100, out_features=1, bias=False)
)
{% endraw %} {% raw %}
# plt.plot(training_trajectory['epoch'], training_trajectory['train_loss'])
# plt.xlabel('Epochs')
# plt.ylabel('MSE + L1 Loss')
# plt.grid()
# plt.show()
# model.l1.weight
{% endraw %}