Skip to content

Commit

Permalink
Merge pull request #31 from WenjieDu/fix_config_err_for_NNI_tuning
Browse files Browse the repository at this point in the history
Fixing errors in config files for NNI tuning
  • Loading branch information
WenjieDu authored Nov 30, 2023
2 parents 21c80b2 + d130043 commit c64cec1
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 20 deletions.
10 changes: 6 additions & 4 deletions NNI_tuning/BRITS/BRITS_basic_config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,18 @@ model_type = BRITS
rnn_hidden_size = 1024

[training]
; whether to have masked imputation task in training
masked_imputation_task = False
; whether to have Masked Imputation Task (MIT) in training
MIT = False
; whether to have Observed Reconstruction Task (ORT) in training
ORT = True
; max num of training epochs
epochs = 100
; which device for training, cpu/cuda
device = cuda
; learning rate
lr = 0.001
; weight for regression loss
regression_loss_weight = 1
; weight for reconstruction loss
reconstruction_loss_weight = 1
; weight for imputation loss
imputation_loss_weight = 1
; weight for consistency loss, here we use to adjust the importance of consistency loss
Expand Down
12 changes: 6 additions & 6 deletions NNI_tuning/MRNN/MRNN_basic_config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -42,20 +42,20 @@ model_type = MRNN
rnn_hidden_size = 1024

[training]
; whether to have masked imputation task in training
masked_imputation_task = False
; whether to have Masked Imputation Task (MIT) in training
MIT = False
; whether to have Observed Reconstruction Task (ORT) in training
ORT = True
; max num of training epochs
epochs = 100
; which device for training, cpu/cuda
device = cuda
; learning rate
lr = 0.001
; weight for regression loss
regression_loss_weight = 1
; weight for reconstruction loss
reconstruction_loss_weight = 1
; weight for imputation loss
imputation_loss_weight = 1
; weight for consistency loss, here we use to adjust the importance of consistency loss
consistency_loss_weight = 1
; patience of early stopping, -1 means not applied (current early stopping is based on total loss)
early_stop_patience = 30
; what type of optimizer to use, adam/adamw
Expand Down
12 changes: 7 additions & 5 deletions NNI_tuning/SAITS/SAITS_basic_config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -62,17 +62,19 @@ dropout = 0.1
diagonal_attention_mask = True

[training]
; whether to have masked imputation task in training
masked_imputation_task = True
; whether to have Masked Imputation Task (MIT) in training
MIT = True
; whether to have Observed Reconstruction Task (ORT) in training
ORT = True
; max num of training epochs
epochs = 100
; which device for training, cpu/cuda
device = cuda
; learning rate
lr = 0.001
; weight for regression loss, 0.3 is the default one from original paper
regression_loss_weight = 1
; weight for imputation loss, 0.3 is the default one from original paper
; weight for reconstruction loss
reconstruction_loss_weight = 1
; weight for imputation loss
imputation_loss_weight = 1
; patience of early stopping, -1 means not applied (current early stopping is based on total loss)
early_stop_patience = -1
Expand Down
12 changes: 7 additions & 5 deletions NNI_tuning/Transformer/Transformer_basic_config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -62,17 +62,19 @@ dropout = 0.1
diagonal_attention_mask = False

[training]
; whether to have masked imputation task in training
masked_imputation_task = True
; whether to have Masked Imputation Task (MIT) in training
MIT = True
; whether to have Observed Reconstruction Task (ORT) in training
ORT = True
; max num of training epochs
epochs = 100
; which device for training, cpu/cuda
device = cuda
; learning rate
lr = 0.001
; weight for regression loss, 0.3 is the default one from original paper
regression_loss_weight = 1
; weight for imputation loss, 0.3 is the default one from original paper
; weight for reconstruction loss
reconstruction_loss_weight = 1
; weight for imputation loss
imputation_loss_weight = 1
; patience of early stopping, -1 means not applied (current early stopping is based on total loss)
early_stop_patience = -1
Expand Down

0 comments on commit c64cec1

Please sign in to comment.