-
Notifications
You must be signed in to change notification settings - Fork 0
/
exp_baseline_pNN.py
75 lines (59 loc) · 2.56 KB
/
exp_baseline_pNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
#!/usr/bin/env python
#SBATCH --job-name=BSLPNN
#SBATCH --error=%x.%j.err
#SBATCH --output=%x.%j.out
#SBATCH [email protected]
#SBATCH --export=ALL
#SBATCH --time=48:00:00
#SBATCH --partition=sdil
#SBATCH --gres=gpu:1
import os
import sys
sys.path.append(os.getcwd())
sys.path.append(os.path.join(os.getcwd(), 'utils'))
from configuration import *
import torch
import pprint
from utils import *
import BaselineModels as B
args = parser.parse_args()
for seed in range(10):
args.SEED = seed
args = FormulateArgs(args)
print(f'Training network on device: {args.DEVICE}.')
MakeFolder(args)
train_loader, datainfo = GetDataLoader(args, 'train')
valid_loader, datainfo = GetDataLoader(args, 'valid')
test_loader, datainfo = GetDataLoader(args, 'test')
pprint.pprint(datainfo)
SetSeed(args.SEED)
setup = f"baseline_model_pNN_data_{args.DATASET:02d}_{datainfo['dataname']}_seed_{args.SEED:02d}.model"
print(f'Training setup: {setup}.')
msglogger = GetMessageLogger(args, setup)
msglogger.info(f'Training network on device: {args.DEVICE}.')
msglogger.info(f'Training setup: {setup}.')
msglogger.info(datainfo)
if os.path.isfile(f'{args.savepath}/{setup}'):
print(f'{setup} exists, skip this training.')
msglogger.info('Training was already finished.')
else:
topology = [datainfo['N_feature']] + args.hidden + [datainfo['N_class']]
msglogger.info(f'Topology of the network: {topology}.')
pnn = B.pNN(topology, args).to(args.DEVICE)
msglogger.info(f'Number of parameters that could be learned: {len(dict(pnn.named_parameters()).keys())}.')
msglogger.info(dict(pnn.named_parameters()).keys())
msglogger.info(f'Number of parameters that are learned in this experiment: {len(pnn.GetParam())}.')
lossfunction = B.LossFN(args).to(args.DEVICE)
optimizer = torch.optim.Adam(pnn.GetParam(), lr=args.LR)
if args.PROGRESSIVE:
pnn, best = train_pnn_progressive(pnn, train_loader, valid_loader, lossfunction, optimizer, args, msglogger, UUID=setup)
else:
pnn, best = train_pnn(pnn, train_loader, valid_loader, lossfunction, optimizer, args, msglogger, UUID=setup)
if best:
if not os.path.exists(f'{args.savepath}/'):
os.makedirs(f'{args.savepath}/')
torch.save(pnn, f'{args.savepath}/{setup}')
msglogger.info('Training if finished.')
else:
msglogger.warning('Time out, further training is necessary.')
CloseLogger(msglogger)