forked from ppliuboy/SelFlow
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.ini
120 lines (81 loc) · 3.01 KB
/
config.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
[run]
# Total batch size, must be divisible by the number of GPUs.
batch_size = 2
# Total iteration step.
iter_steps = 500000
# The initial learning rate.
initial_learning_rate = 1e-4
# Interval for decaying the learning rate.
decay_steps = 5e4
# The decay rate.
decay_rate = 0.5
# Whether to scale optical flow during downsampling or upsampling.
is_scale = True
# Number of threads for loading input examples.
num_input_threads = 4
# 'beta1' for Adam optimizer: the exponential decay rate for the 1st moment estimates.
beta1 = 0.9
# Number of elements the new dataset will sample.
buffer_size = 1000
# Number of gpus to use.
num_gpus = 2
# CPU that guides mul-gpu trainging.
cpu_device = /cpu:0
# How many steps to save checkpoint.
save_checkpoint_interval = 25000
# How many steps to write summary.
write_summary_interval = 200
# How many steps to display log on the terminal.
display_log_interval = 100
# tf.ConfigProto parameters.
allow_soft_placement = True
log_device_placement = False
# L2 weight decay.
regularizer_scale = 1e-4
# save direcory of model, summary, sample and so on, better save it as dataset name.
save_dir = KITTI
# Home directpty for checkpoints, summary and sample.
model_name = Fin_R1c
# Checkpoints directory, it shall be 'save_dir/model_name/checkpoint_dir'.
checkpoint_dir = checkpoints
# Summary directory, it shall be 'save_dir/model_name/summary_dir'.
summary_dir = summary
# Sample directory, it shall be 'save_dir/model_name/sample_dir'.
sample_dir = sample
# Mode, one of {train, test, generate_fake_flow_occlusion}.
mode = train
# Training mode, one of {no_self_supervision, self_supervision}.
training_mode = self_supervision
# Bool type, whether restore model from a checkpoint.
is_restore_model = True
# Restoration model name. If is_restore_model=True, restore this checkpoint
restore_model = ./KITTI/checkpoints/Fin_OCC/model-200000
[dataset]
# Cropping height for training.
crop_h = 320
# Cropping width for training.
crop_w = 896
# Image name list.
# For testing and supervised fine-tuning: 4 columns, first 3 columns are the name of three input images, the last column is the saving image name
# For unsupervised training: 6 columns, first 5 columns are 5 input images, the last column is the saving image name, used for self-supervised training to match flow and occlusion map.
data_list_file = ./dataset/KITTI/train_raw_2015_with_id.txt
# Image storage direcory.
img_dir = ../datasets/KITTI/training
superpixel_dir = ../datasets/KITTI_superpix/training
# Whether to normalize image as input
is_normalize_img = False
[self_supervision]
# Image patch height for self-supervised training.
target_h = 256
# Image patch width for self-supervised training.
target_w = 640
# Generated flow and occlusion map directory.
fake_flow_occ_dir = ./KITTI/sample/Fin
[test]
# Restoration model name.
restore_model = ./models/KITTI/
save_dir = ./images/test_images
[generate_fake_flow_occlusion]
# Restoration model name.
restore_model = ./KITTI/checkpoints/Fin_OCC/model-200000
save_dir = ./KITTI/sample/Fin