-
Notifications
You must be signed in to change notification settings - Fork 0
/
settings.ini
81 lines (78 loc) · 2.89 KB
/
settings.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
[DATA]
; ---> Select subject sex: "male" or "female" and size "small" or "full" or "tiny" or "han" (13020) or "everything" (17549) or "testAB"
dataset_sex = "female"
dataset_size = "testAB"
data_folder_name = "datasets"
; ---> Select from "T1_nonlinear", "T1_linear", "T2_nonlinear", "tracts"...
modality_flag = ("T1_linear","T1_nonlinear")
num_workers = 8
; ---> Relevant Databases if pre-processing everything on the fly
male_train = "male_train"
male_train_age = "male_train_age"
male_validation = "male_validation"
male_validation_age = "male_validation_age"
data_directory = "/well/win-biobank/projects/imaging/data/data3/subjectsAll/"
scaling_values = "datasets/scaling_values_simple.csv"
; ---> Apply (True) or not (False) data augmentation (voxel shift only)
shift_transformation = True
mirror_transformation = False
; ---> Is the data fused at input (True) or not (False) (REDUNDANT)
fused_data_flag = False
; ---> Temporary flag informing if we should use a T1/T2 ratio (!=0) and if we should reaturn T1 + ratio (1) or T1+T2+ratio (2)
t1t2ratio_flag = 0
[NETWORK]
; ---> The network number (int) ref to the different networks under investigation (see notes!)
network_number = 5
number_of_classes = 1
network_2_modality_filter_outputs = 1
; ---> Which norm layer to use: 'batch', 'instance', 'instance_default', 'layer', 'group' (fixed 8 groups)
norm_flag = 'batch'
; ---> Which nonlinearity to use: 'relu', 'leakyrelu'
nonlin_flag = 'relu'
; ---> If we want to use dropout in 1st FC (0.25) or not (0)
dropout_fc1_flag = 0
dropout_fc2_flag = 0
dropout_fc3_flag = 0
; ---> Flags for transfer-learning network
use_transfer_learning = False
previous_experiment_names = ('AM1-1', 'AM1-5')
freeze_pretrained_weights_flag = True
pretrained_model_directory = 'agemapper'
[TRAINING]
; ---> Model Properties
experiment_name = "MM1-1"
training_batch_size = 12
validation_batch_size = 12
use_pre_trained = False
pre_trained_experiment_name = 'MM1-28'
learning_rate = 1e-2
optimizer_beta = (0.9, 0.999)
optimizer_epsilon = 1e-8
optimizer_weigth_decay = 0
number_of_epochs = 200
loss_log_period = 50
; ---> Learning rate scheduling
learning_rate_scheduler_step_size = 250
learning_rate_scheduler_gamma = 0.5
learning_rate_validation_scheduler = True
learning_rate_cyclical = False
learning_rate_scheduler_patience = 15
learning_rate_scheduler_threshold = 1e-7
learning_rate_scheduler_min_value = 1e-6
learning_rate_scheduler_max_value = 3e-5
learning_rate_scheduler_step_number = 13200
early_stopping_min_patience = 100
early_stopping_patience = 40
early_stopping_min_delta = 0
; ---> Additional properties
use_last_checkpoint = False
; ---> Select from a list of either adam, adamW
optimiser = 'adam'
; ---> Select from a list of either mse, mae
loss_function = 'mse'
[MISC]
save_model_directory = "saved_models"
logs_directory = "logs"
checkpoint_directory = "checkpoints"
best_checkpoint_directory = "best_checkpoint"
experiments_directory = "experiments"