-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathtrain.py
executable file
·316 lines (264 loc) · 13 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
"""train an agent for OpenAI gym environments
Author: Mohammadamin Barekatain
Affiliation: TUM & OSX
Small parts of this script has been copied from https://github.com/araffin/rl-baselines-zoo
"""
import argparse
import time
import random
import difflib
import os
from collections import OrderedDict
from pprint import pprint
import gym
import numpy as np
import yaml
from stable_baselines import logger
from stable_baselines.bench import Monitor
from stable_baselines.common import set_global_seeds
from stable_baselines.common.cmd_util import make_atari_env
from stable_baselines.common.vec_env import VecFrameStack, SubprocVecEnv, VecNormalize, DummyVecEnv
from stable_baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
from stable_baselines.ddpg import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.ppo2.ppo2 import constfn
from utils import make_env, linear_schedule, get_latest_run_id, load_group_results, parse_unknown_args, create_test_env
from utils.callbacks import VideoRecorder, ModelCheckpoint
from utils.plot import plot_results
from utils.policies import ALGOS
from utils.wrappers import modify_env_params, RandomUniformEnvParams
parser = argparse.ArgumentParser(description='Any extra args will be used for modifying environment dynamics')
parser.add_argument('--env', type=str, required=True, help='environment ID')
parser.add_argument('--algo', help='RL Algorithm', type=str, required=True, choices=list(ALGOS.keys()))
parser.add_argument('--seed', help='Random generator seed', type=int, default=0)
parser.add_argument('--exp-name', help='(optional) experiment name, DO NOT USE _', type=str, default=None)
parser.add_argument('-n', '--n-timesteps', help='Overwrite the number of timesteps', default=-1, type=int)
parser.add_argument('--params-ranges', type=str, nargs='+', default=[], help='ranges of the change to the env dynamics')
parser.add_argument('--trained-agent', help='Path to a pretrained agent to continue training', default='', type=str)
parser.add_argument('--save_video_interval', help='Save video every x train steps (0 = disabled)', default=0, type=int)
parser.add_argument('--save_video_length', help='Length of recorded video. Default: 500', default=500, type=int)
parser.add_argument('--play', help='Length of gif of the final trained agent (-1 = disabled)', default=-1, type=int)
parser.add_argument('--log-outputs', help='Save the outputs instead of diplying them', default=False)
parser.add_argument('--log-interval', help='Override log interval (default: -1, no change)', default=-1, type=int)
parser.add_argument('-f', '--log-folder', help='Log folder', type=str, default='logs')
parser.add_argument('--no-monitor', help='do not monitor training', action='store_true', default=False)
parser.add_argument('--no-tensorboard', help='do not create tensorboard', action='store_true', default=False)
parser.add_argument('--no-plot', help='do not plot the results', action='store_true', default=False)
parser.add_argument('--checkpoint', help='save checkpoints', action='store_true', default=False)
# ToDo: support changing environments for Atari
args, env_params = parser.parse_known_args()
env_params = parse_unknown_args(env_params)
params_ranges = args.params_ranges
### Random delay to have successful parallel trainings
time.sleep(random.random() * 5.0)
### Sanity check
assert not (len(params_ranges) > 0 and len(env_params) > 0), \
'when param ranges is provided, params for modifying env must be empty. params_ranges:{}, env_params:{}'.format(
params_ranges, env_params)
if args.trained_agent != "":
assert args.trained_agent.endswith('.pkl') and os.path.isfile(args.trained_agent), \
"The trained_agent must be a valid path to a .pkl file"
###
set_global_seeds(args.seed)
env_id = args.env
registered_envs = set(gym.envs.registry.env_specs.keys())
# If the environment is not found, suggest the closest match
if env_id not in registered_envs:
closest_match = difflib.get_close_matches(env_id, registered_envs, n=1)[0]
raise ValueError('{} not found in gym registry, you maybe meant {}?'.format(env_id, closest_match))
exp_name = args.exp_name
log_path = "{}/{}/".format(args.log_folder, args.algo)
if exp_name:
assert (not ('_' in exp_name)), 'experiment name should not include _'
save_path = os.path.join(log_path,
"{}_{}_{}".format(env_id, exp_name, get_latest_run_id(log_path, env_id, exp_name) + 1))
else:
save_path = os.path.join(log_path, "{}_{}".format(env_id, get_latest_run_id(log_path, env_id) + 1))
if args.log_outputs:
# Log the outputs
logger.configure(folder=save_path, format_strs=['log'])
params_path = "{}/{}".format(save_path, env_id)
os.makedirs(params_path, exist_ok=True)
tensorboard_log = None if args.no_tensorboard else save_path
monitor_log = None if args.no_monitor else save_path
is_atari = 'NoFrameskip' in env_id
print("=" * 10, env_id, "=" * 10)
# Load hyperparameters from yaml file
with open('hyperparams/{}.yml'.format(args.algo), 'r') as f:
hyperparams_dict = yaml.load(f)
if is_atari:
hyperparams = hyperparams_dict['atari']
elif env_id in list(hyperparams_dict.keys()):
hyperparams = hyperparams_dict[env_id]
else:
raise ValueError("Hyperparameters not found for {}-{}".format(args.algo, env_id))
# Should we overwrite the number of timesteps?
if args.n_timesteps > 0:
hyperparams['n_timesteps'] = args.n_timesteps
n_timesteps = int(hyperparams['n_timesteps'])
# Sort hyperparams that will be saved
saved_hyperparams = OrderedDict([(key, hyperparams[key]) for key in sorted(hyperparams.keys())])
pprint(saved_hyperparams)
if len(env_params):
print("environment parameters")
pprint(env_params)
elif len(params_ranges):
print("ranges for environment parameters")
pprint(params_ranges)
n_envs = hyperparams.get('n_envs', 1)
print("Using {} environments".format(n_envs))
# Create learning rate schedules for ppo2 and sac
if args.algo in ["ppo2", "sac"]:
for key in ['learning_rate', 'cliprange']:
if key not in hyperparams:
continue
if isinstance(hyperparams[key], str):
schedule, initial_value = hyperparams[key].split('_')
initial_value = float(initial_value)
hyperparams[key] = linear_schedule(initial_value)
elif isinstance(hyperparams[key], float):
hyperparams[key] = constfn(hyperparams[key])
else:
raise ValueError('Invalid valid for {}: {}'.format(key, hyperparams[key]))
normalize = False
normalize_kwargs = {}
if 'normalize' in hyperparams.keys():
normalize = hyperparams['normalize']
if isinstance(normalize, str):
normalize_kwargs = eval(normalize)
normalize = True
del hyperparams['normalize']
# Delete keys so the dict can be pass to the model constructor
if 'n_envs' in hyperparams.keys():
del hyperparams['n_envs']
del hyperparams['n_timesteps']
############### Create the environment and wrap it if necessary
if is_atari:
print("Using Atari wrapper")
env = make_atari_env(env_id, num_env=n_envs, seed=args.seed)
# Frame-stacking with 4 frames
env = VecFrameStack(env, n_stack=4)
if not args.no_monitor:
print("WARNING: monitor is not supported yet for atari env")
elif args.algo in ['dqn', 'ddpg']:
if hyperparams.get('normalize', False):
print("WARNING: normalization not supported yet for DDPG/DQN")
env = gym.make(env_id)
if len(env_params) > 0:
env = modify_env_params(env, params_path, **env_params)
elif len(params_ranges) > 0:
env = RandomUniformEnvParams(env, params_path, params_ranges)
env.seed(args.seed)
if not args.no_monitor:
env = Monitor(env, monitor_log, allow_early_resets=True)
else:
if n_envs == 1:
env = DummyVecEnv([make_env(env_id, 0, args.seed, monitor_log, env_params, params_path, params_ranges)])
else:
env = SubprocVecEnv([make_env(env_id, i, args.seed, monitor_log, env_params, params_path, params_ranges)
for i in range(n_envs)])
if normalize:
print("Normalizing input and return")
env = VecNormalize(env, **normalize_kwargs)
# Optional Frame-stacking
n_stack = 1
if hyperparams.get('frame_stack', False):
n_stack = hyperparams['frame_stack']
env = VecFrameStack(env, n_stack)
print("Stacking {} frames".format(n_stack))
del hyperparams['frame_stack']
###############
if args.save_video_interval > 0:
# ToDo: make interval a function of number of updates.
env_hyperparams = {'normalize': normalize, 'n_stack': n_stack, 'normalize_kwargs': normalize_kwargs}
callback = VideoRecorder(env_id, save_path, env_hyperparams, params_path,
args.save_video_length, interval=args.save_video_interval, env_params=env_params).callback
elif args.checkpoint:
if args.algo in ['multipolar-ppo2', 'ppo2']:
interval = n_timesteps / hyperparams['n_steps'] / n_envs / 10
elif args.algo in ['sac', 'multipolar-sac']:
interval = n_timesteps / 10
else:
raise NotImplementedError()
interval = int(interval)
callback = ModelCheckpoint(save_path, interval).callback
else:
callback = None
# Parse noise string for DDPG
if args.algo == 'ddpg' and hyperparams.get('noise_type') is not None:
noise_type = hyperparams['noise_type'].strip()
noise_std = hyperparams['noise_std']
n_actions = env.action_space.shape[0]
if 'adaptive-param' in noise_type:
hyperparams['param_noise'] = AdaptiveParamNoiseSpec(initial_stddev=noise_std, desired_action_stddev=noise_std)
elif 'normal' in noise_type:
hyperparams['action_noise'] = NormalActionNoise(mean=np.zeros(n_actions), sigma=noise_std * np.ones(n_actions))
elif 'ornstein-uhlenbeck' in noise_type:
hyperparams['action_noise'] = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions),
sigma=noise_std * np.ones(n_actions))
else:
raise RuntimeError('Unknown noise type "{}"'.format(noise_type))
print("Applying {} noise with std {}".format(noise_type, noise_std))
del hyperparams['noise_type']
del hyperparams['noise_std']
if args.trained_agent.endswith('.pkl') and os.path.isfile(args.trained_agent):
# Continue training
print(">>> Loading pretrained agent")
# Policy should not be changed
del hyperparams['policy']
model = ALGOS[args.algo].load(args.trained_agent, env=env,
tensorboard_log=tensorboard_log, verbose=1, **hyperparams)
exp_folder = args.trained_agent.split('.pkl')[0]
if normalize:
print("Loading saved running average")
env.load_running_average(exp_folder)
else:
# Train an agent from scratch
model = ALGOS[args.algo](env=env, tensorboard_log=tensorboard_log, verbose=1, **hyperparams)
kwargs = {}
if args.log_interval > -1:
kwargs = {'log_interval': args.log_interval}
model.learn(n_timesteps, callback=callback, **kwargs)
# Save trained model
print("Saving to {}".format(save_path))
model.save("{}/{}".format(save_path, env_id))
# Save hyperparams
with open(os.path.join(params_path, 'config.yml'), 'w') as f:
saved_hyperparams.update(args.__dict__)
saved_hyperparams.update(env_params)
saved_hyperparams.update({'params_ranges': params_ranges})
yaml.dump(saved_hyperparams, f)
if normalize:
# Unwrap
if isinstance(env, VecFrameStack):
env = env.venv
# Important: save the running average, for testing the agent we need that normalization
env.save_running_average(params_path)
if not args.no_plot and n_timesteps > 1:
results = load_group_results(save_path, verbose=True)
f, _ = plot_results(results, average_group=False, shaded_std=False)
f.savefig(os.path.join(save_path, 'results.png'), bbox_inches='tight', format='png')
if args.play > 0:
test_path = os.path.join(save_path, 'test')
env_hyperparams = {'normalize': normalize, 'n_stack': n_stack, 'normalize_kwargs': normalize_kwargs}
env = create_test_env(env_id, n_envs=1, stats_path=params_path, log_dir=test_path,
hyperparams=env_hyperparams, env_params=env_params)
env.reset()
env = VecVideoRecorder(env, test_path, record_video_trigger=lambda x: x == 0, video_length=args.play,
name_prefix="{}-{}-{}-final".format(exp_name, args.algo, env_id))
obs = env.reset()
for _ in range(args.play + 1):
# action = [env.action_space.sample()]
action, _ = model.predict(obs, deterministic=True)
if isinstance(env.action_space, gym.spaces.Box):
action = np.clip(action, env.action_space.low, env.action_space.high)
obs, _, _, _ = env.step(action)
# Workaround for https://github.com/openai/gym/issues/893
if 'Bullet' not in env_id and not is_atari:
env = env.venv
# DummyVecEnv
while isinstance(env, VecNormalize) or isinstance(env, VecFrameStack):
env = env.venv
env.envs[0].env.close()
else:
# SubprocVecEnv
env.close()