This repository has been archived by the owner on May 18, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 31
/
Copy pathtrain_options.py
51 lines (48 loc) · 1.7 KB
/
train_options.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import argparse
from options.base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self, parser: argparse.ArgumentParser):
parser = BaseOptions.initialize(self, parser)
# data
parser.add_argument("--no_shuffle", action="store_true", help="don't shuffle input data")
# checkpoints
parser.add_argument(
"--save_count",
type=int,
help="how often in steps to always save a checkpoint",
default=10000,
)
parser.add_argument(
"--val_check_interval",
"--val_frequency",
dest="val_check_interval",
type=str,
default="0.125", # parsed later into int or float based on "."
help="If float, validate (and checkpoint) after this many epochs. "
"If int, validate after this many batches. If 0 or 0.0, validate "
"every step."
)
# optimization
parser.add_argument(
"--lr", type=float, default=1e-4, help="initial learning rate for adam"
)
parser.add_argument(
"--keep_epochs",
type=int,
help="number of epochs with initial learning rate",
default=5,
)
parser.add_argument(
"--decay_epochs",
type=int,
help="number of epochs to linearly decay the learning rate",
default=5,
)
parser.add_argument(
"--accumulated_batches",
type = int,
help = "number of batch gradients to accumulate before calling optimizer.step()",
default = 1
)
self.is_train = True
return parser