Skip to content

Commit

Permalink
[DOCS] Docstrings (#1279)
Browse files Browse the repository at this point in the history
  • Loading branch information
elephaint authored Feb 27, 2025
1 parent bb315be commit 0adcb58
Show file tree
Hide file tree
Showing 66 changed files with 640 additions and 457 deletions.
13 changes: 8 additions & 5 deletions nbs/models.autoformer.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -445,8 +445,9 @@
"\t`activation`: str=`GELU`, activation from ['ReLU', 'Softplus', 'Tanh', 'SELU', 'LeakyReLU', 'PReLU', 'Sigmoid', 'GELU'].<br>\n",
" `encoder_layers`: int=2, number of layers for the TCN encoder.<br>\n",
" `decoder_layers`: int=1, number of layers for the MLP decoder.<br>\n",
" `distil`: bool = True, wether the Autoformer decoder uses bottlenecks.<br>\n",
" `MovingAvg_window`: int=25, window size for the moving average filter.<br>\n",
" `loss`: PyTorch module, instantiated train loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>\n",
" `valid_loss`: PyTorch module, instantiated validation loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>\n",
" `max_steps`: int=1000, maximum number of training steps.<br>\n",
" `learning_rate`: float=1e-3, Learning rate between (0, 1).<br>\n",
" `num_lr_decays`: int=-1, Number of learning rate decays, evenly distributed across max_steps.<br>\n",
Expand All @@ -460,7 +461,7 @@
" `scaler_type`: str='robust', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
" `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.<br>\n",
" `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.<br>\n",
" `alias`: str, optional, Custom name of the model.<br>\n",
" `alias`: str, optional, Custom name of the model.<br>\n",
" `optimizer`: Subclass of 'torch.optim.Optimizer', optional, user specified optimizer instead of the default choice (Adam).<br>\n",
" `optimizer_kwargs`: dict, optional, list of parameters used by the user specified `optimizer`.<br>\n",
" `lr_scheduler`: Subclass of 'torch.optim.lr_scheduler.LRScheduler', optional, user specified lr_scheduler instead of the default choice (StepLR).<br>\n",
Expand Down Expand Up @@ -511,6 +512,7 @@
" scaler_type: str = 'identity',\n",
" random_seed: int = 1,\n",
" drop_last_loader: bool = False,\n",
" alias: Optional[str] = None,\n",
" optimizer = None,\n",
" optimizer_kwargs = None,\n",
" lr_scheduler = None,\n",
Expand All @@ -519,8 +521,8 @@
" **trainer_kwargs):\n",
" super(Autoformer, self).__init__(h=h,\n",
" input_size=input_size,\n",
" hist_exog_list=hist_exog_list,\n",
" stat_exog_list=stat_exog_list,\n",
" hist_exog_list=hist_exog_list,\n",
" futr_exog_list = futr_exog_list,\n",
" exclude_insample_y = exclude_insample_y,\n",
" loss=loss,\n",
Expand All @@ -531,14 +533,15 @@
" early_stop_patience_steps=early_stop_patience_steps,\n",
" val_check_steps=val_check_steps,\n",
" batch_size=batch_size,\n",
" windows_batch_size=windows_batch_size,\n",
" valid_batch_size=valid_batch_size,\n",
" windows_batch_size=windows_batch_size,\n",
" inference_windows_batch_size=inference_windows_batch_size,\n",
" start_padding_enabled = start_padding_enabled,\n",
" step_size=step_size,\n",
" scaler_type=scaler_type,\n",
" drop_last_loader=drop_last_loader,\n",
" random_seed=random_seed,\n",
" drop_last_loader=drop_last_loader,\n",
" alias=alias,\n",
" optimizer=optimizer,\n",
" optimizer_kwargs=optimizer_kwargs,\n",
" lr_scheduler=lr_scheduler,\n",
Expand Down
2 changes: 2 additions & 0 deletions nbs/models.bitcn.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@
" scaler_type: str = 'identity',\n",
" random_seed: int = 1,\n",
" drop_last_loader: bool = False,\n",
" alias: Optional[str] = None,\n",
" optimizer = None,\n",
" optimizer_kwargs = None,\n",
" lr_scheduler = None,\n",
Expand Down Expand Up @@ -252,6 +253,7 @@
" scaler_type=scaler_type,\n",
" random_seed=random_seed,\n",
" drop_last_loader=drop_last_loader,\n",
" alias=alias,\n",
" optimizer=optimizer,\n",
" optimizer_kwargs=optimizer_kwargs,\n",
" lr_scheduler=lr_scheduler,\n",
Expand Down
16 changes: 9 additions & 7 deletions nbs/models.deepar.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@
"\n",
" **Parameters:**<br>\n",
" `h`: int, Forecast horizon. <br>\n",
" `input_size`: int, autorregresive inputs size, y=[1,2,3,4] input_size=2 -> y_[t-2:t]=[1,2].<br>\n",
" `input_size`: int, maximum sequence length for truncated train backpropagation. Default -1 uses 3 * horizon <br>\n",
" `lstm_n_layers`: int=2, number of LSTM layers.<br>\n",
" `lstm_hidden_size`: int=128, LSTM hidden size.<br>\n",
" `lstm_dropout`: float=0.1, LSTM dropout.<br>\n",
Expand Down Expand Up @@ -209,9 +209,9 @@
" decoder_hidden_layers: int = 0,\n",
" decoder_hidden_size: int = 0,\n",
" trajectory_samples: int = 100,\n",
" futr_exog_list = None,\n",
" hist_exog_list = None,\n",
" stat_exog_list = None,\n",
" hist_exog_list = None,\n",
" futr_exog_list = None,\n",
" exclude_insample_y = False,\n",
" loss = DistributionLoss(distribution='StudentT', level=[80, 90], return_params=False),\n",
" valid_loss = MAE(),\n",
Expand All @@ -229,6 +229,7 @@
" scaler_type: str = 'identity',\n",
" random_seed: int = 1,\n",
" drop_last_loader = False,\n",
" alias: Optional[str] = None,\n",
" optimizer = None,\n",
" optimizer_kwargs = None,\n",
" lr_scheduler = None,\n",
Expand All @@ -242,9 +243,9 @@
" # Inherit BaseWindows class\n",
" super(DeepAR, self).__init__(h=h,\n",
" input_size=input_size,\n",
" futr_exog_list=futr_exog_list,\n",
" hist_exog_list=hist_exog_list,\n",
" stat_exog_list=stat_exog_list,\n",
" hist_exog_list=hist_exog_list,\n",
" futr_exog_list=futr_exog_list,\n",
" exclude_insample_y = exclude_insample_y,\n",
" loss=loss,\n",
" valid_loss=valid_loss,\n",
Expand All @@ -254,14 +255,15 @@
" early_stop_patience_steps=early_stop_patience_steps,\n",
" val_check_steps=val_check_steps,\n",
" batch_size=batch_size,\n",
" windows_batch_size=windows_batch_size,\n",
" valid_batch_size=valid_batch_size,\n",
" windows_batch_size=windows_batch_size,\n",
" inference_windows_batch_size=inference_windows_batch_size,\n",
" start_padding_enabled=start_padding_enabled,\n",
" step_size=step_size,\n",
" scaler_type=scaler_type,\n",
" drop_last_loader=drop_last_loader,\n",
" random_seed=random_seed,\n",
" drop_last_loader=drop_last_loader,\n",
" alias=alias,\n",
" optimizer=optimizer,\n",
" optimizer_kwargs=optimizer_kwargs,\n",
" lr_scheduler=lr_scheduler,\n",
Expand Down
16 changes: 9 additions & 7 deletions nbs/models.deepnpts.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,14 @@
" \n",
" def __init__(self,\n",
" h,\n",
" input_size: int = -1,\n",
" input_size: int,\n",
" hidden_size: int = 32,\n",
" batch_norm: bool = True,\n",
" dropout: float = 0.1,\n",
" n_layers: int = 2,\n",
" futr_exog_list = None,\n",
" hist_exog_list = None,\n",
" stat_exog_list = None,\n",
" hist_exog_list = None,\n",
" futr_exog_list = None,\n",
" exclude_insample_y = False,\n",
" loss = MAE(),\n",
" valid_loss = MAE(),\n",
Expand All @@ -167,6 +167,7 @@
" scaler_type: str = 'standard',\n",
" random_seed: int = 1,\n",
" drop_last_loader = False,\n",
" alias: Optional[str] = None,\n",
" optimizer = None,\n",
" optimizer_kwargs = None,\n",
" lr_scheduler = None,\n",
Expand All @@ -186,9 +187,9 @@
" # Inherit BaseWindows class\n",
" super(DeepNPTS, self).__init__(h=h,\n",
" input_size=input_size,\n",
" futr_exog_list=futr_exog_list,\n",
" hist_exog_list=hist_exog_list,\n",
" stat_exog_list=stat_exog_list,\n",
" hist_exog_list=hist_exog_list,\n",
" futr_exog_list=futr_exog_list,\n",
" exclude_insample_y = exclude_insample_y,\n",
" loss=loss,\n",
" valid_loss=valid_loss,\n",
Expand All @@ -198,14 +199,15 @@
" early_stop_patience_steps=early_stop_patience_steps,\n",
" val_check_steps=val_check_steps,\n",
" batch_size=batch_size,\n",
" windows_batch_size=windows_batch_size,\n",
" valid_batch_size=valid_batch_size,\n",
" windows_batch_size=windows_batch_size,\n",
" inference_windows_batch_size=inference_windows_batch_size,\n",
" start_padding_enabled=start_padding_enabled,\n",
" step_size=step_size,\n",
" scaler_type=scaler_type,\n",
" drop_last_loader=drop_last_loader,\n",
" random_seed=random_seed,\n",
" drop_last_loader=drop_last_loader,\n",
" alias=alias,\n",
" optimizer=optimizer,\n",
" optimizer_kwargs=optimizer_kwargs,\n",
" lr_scheduler=lr_scheduler,\n",
Expand Down
15 changes: 11 additions & 4 deletions nbs/models.dilated_rnn.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -376,8 +376,8 @@
"\n",
" **Parameters:**<br>\n",
" `h`: int, forecast horizon.<br>\n",
" `input_size`: int, maximum sequence length for truncated train backpropagation. Default -1 uses all history.<br>\n",
" `inference_input_size`: int, maximum sequence length for truncated inference. Default -1 uses all history.<br>\n",
" `input_size`: int, maximum sequence length for truncated train backpropagation. Default -1 uses 3 * horizon <br>\n",
" `inference_input_size`: int, maximum sequence length for truncated inference. Default None uses input_size history.<br>\n",
" `cell_type`: str, type of RNN cell to use. Options: 'GRU', 'RNN', 'LSTM', 'ResLSTM', 'AttentiveLSTM'.<br>\n",
" `dilations`: int list, dilations betweem layers.<br>\n",
" `encoder_hidden_size`: int=200, units for the RNN's hidden state size.<br>\n",
Expand All @@ -387,6 +387,7 @@
" `futr_exog_list`: str list, future exogenous columns.<br>\n",
" `hist_exog_list`: str list, historic exogenous columns.<br>\n",
" `stat_exog_list`: str list, static exogenous columns.<br>\n",
" `exclude_insample_y`: bool=False, the model skips the autoregressive features y[t-input_size:t] if True.<br>\n",
" `loss`: PyTorch module, instantiated train loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>\n",
" `valid_loss`: PyTorch module=`loss`, instantiated valid loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>\n",
" `max_steps`: int, maximum number of training steps.<br>\n",
Expand All @@ -396,6 +397,9 @@
" `val_check_steps`: int, Number of training steps between every validation loss check.<br>\n",
" `batch_size`: int=32, number of different series in each batch.<br>\n",
" `valid_batch_size`: int=None, number of different series in each validation and test batch.<br>\n",
" `windows_batch_size`: int=128, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch, -1 uses all.<br>\n",
" `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br> \n",
" `step_size`: int=1, step size between each window of temporal data.<br>\n",
" `scaler_type`: str='robust', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
" `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.<br>\n",
Expand All @@ -417,8 +421,8 @@
"\n",
" def __init__(self,\n",
" h: int,\n",
" input_size: int,\n",
" inference_input_size: int = -1,\n",
" input_size: int = -1,\n",
" inference_input_size: Optional[int] = None,\n",
" cell_type: str = 'LSTM',\n",
" dilations: List[List[int]] = [[1, 2], [4, 8]],\n",
" encoder_hidden_size: int = 128,\n",
Expand All @@ -445,6 +449,7 @@
" scaler_type: str = 'robust',\n",
" random_seed: int = 1,\n",
" drop_last_loader: bool = False,\n",
" alias: Optional[str] = None,\n",
" optimizer = None,\n",
" optimizer_kwargs = None,\n",
" lr_scheduler = None,\n",
Expand All @@ -454,6 +459,7 @@
" super(DilatedRNN, self).__init__(\n",
" h=h,\n",
" input_size=input_size,\n",
" inference_input_size=inference_input_size,\n",
" futr_exog_list=futr_exog_list,\n",
" hist_exog_list=hist_exog_list,\n",
" stat_exog_list=stat_exog_list,\n",
Expand All @@ -474,6 +480,7 @@
" scaler_type=scaler_type,\n",
" random_seed=random_seed,\n",
" drop_last_loader=drop_last_loader,\n",
" alias=alias,\n",
" optimizer=optimizer,\n",
" optimizer_kwargs=optimizer_kwargs,\n",
" lr_scheduler=lr_scheduler,\n",
Expand Down
10 changes: 7 additions & 3 deletions nbs/models.dlinear.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -143,13 +143,14 @@
"\n",
" *Parameters:*<br>\n",
" `h`: int, forecast horizon.<br>\n",
" `input_size`: int, maximum sequence length for truncated train backpropagation. Default -1 uses all history.<br>\n",
" `futr_exog_list`: str list, future exogenous columns.<br>\n",
" `hist_exog_list`: str list, historic exogenous columns.<br>\n",
" `input_size`: int, maximum sequence length for truncated train backpropagation. <br>\n",
" `stat_exog_list`: str list, static exogenous columns.<br>\n",
" `hist_exog_list`: str list, historic exogenous columns.<br>\n",
" `futr_exog_list`: str list, future exogenous columns.<br>\n",
" `exclude_insample_y`: bool=False, the model skips the autoregressive features y[t-input_size:t] if True.<br>\n",
" `moving_avg_window`: int=25, window size for trend-seasonality decomposition. Should be uneven.<br>\n",
" `loss`: PyTorch module, instantiated train loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>\n",
" `valid_loss`: PyTorch module=`loss`, instantiated valid loss class from [losses collection](https://nixtla.github.io/neuralforecast/losses.pytorch.html).<br>\n",
" `max_steps`: int=1000, maximum number of training steps.<br>\n",
" `learning_rate`: float=1e-3, Learning rate between (0, 1).<br>\n",
" `num_lr_decays`: int=-1, Number of learning rate decays, evenly distributed across max_steps.<br>\n",
Expand All @@ -160,6 +161,7 @@
" `windows_batch_size`: int=1024, number of windows to sample in each training batch, default uses all.<br>\n",
" `inference_windows_batch_size`: int=1024, number of windows to sample in each inference batch.<br>\n",
" `start_padding_enabled`: bool=False, if True, the model will pad the time series with zeros at the beginning, by input size.<br>\n",
" `step_size`: int=1, step size between each window of temporal data.<br>\n",
" `scaler_type`: str='robust', type of scaler for temporal inputs normalization see [temporal scalers](https://nixtla.github.io/neuralforecast/common.scalers.html).<br>\n",
" `random_seed`: int=1, random_seed for pytorch initializer and numpy generators.<br>\n",
" `drop_last_loader`: bool=False, if True `TimeSeriesDataLoader` drops last non-full batch.<br>\n",
Expand Down Expand Up @@ -205,6 +207,7 @@
" scaler_type: str = 'identity',\n",
" random_seed: int = 1,\n",
" drop_last_loader: bool = False,\n",
" alias: Optional[str] = None,\n",
" optimizer = None,\n",
" optimizer_kwargs = None,\n",
" lr_scheduler = None,\n",
Expand Down Expand Up @@ -232,6 +235,7 @@
" step_size=step_size,\n",
" scaler_type=scaler_type,\n",
" drop_last_loader=drop_last_loader,\n",
" alias=alias,\n",
" random_seed=random_seed,\n",
" optimizer=optimizer,\n",
" optimizer_kwargs=optimizer_kwargs,\n",
Expand Down
Loading

0 comments on commit 0adcb58

Please sign in to comment.