Skip to content

Commit

Permalink
fixed f1 follow line ddpg agent for simplified perception
Browse files Browse the repository at this point in the history
  • Loading branch information
rubenlucas93 committed Nov 6, 2023
1 parent a8f6577 commit 9083af2
Show file tree
Hide file tree
Showing 18 changed files with 634 additions and 255 deletions.
4 changes: 2 additions & 2 deletions .gitignore
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ catkin_ws
output
recorders
logs


checkpoints
metrics

### Images ###
JPEG
Expand Down
Empty file modified rl_studio/agents/f1/__init__.py
100644 → 100755
Empty file.
23 changes: 17 additions & 6 deletions rl_studio/agents/f1/loaders.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,8 @@ def __init__(self, config):
self.metrics_graphics_dir = f"{config['settings']['metrics_dir']}/{config['settings']['mode']}/{config['settings']['task']}_{config['settings']['algorithm']}_{config['settings']['agent']}_{config['settings']['framework']}/graphics"
self.recorders_carla_dir = f"{config['settings']['recorder_carla_dir']}/{config['settings']['mode']}/{config['settings']['task']}_{config['settings']['algorithm']}_{config['settings']['agent']}_{config['settings']['framework']}"
self.training_time = config["settings"]["training_time"]
self.debug_stats = config["settings"]["debug_stats"]
self.show_monitoring = config["settings"]["show_monitoring"]
####### States
self.states = config["settings"]["states"]
self.states_set = config["states"][self.states]
Expand All @@ -132,6 +134,10 @@ def __init__(self, config):
self.actions_set = config["actions"][self.actions]
####### Rewards
self.rewards = config["settings"]["rewards"]
###### Exploration
self.steps_to_decrease = config["settings"]["steps_to_decrease"]
self.decrease_substraction = config["settings"]["decrease_substraction"]
self.decrease_min = config["settings"]["decrease_min"]


class LoadEnvVariablesDQNGazebo:
Expand Down Expand Up @@ -270,14 +276,19 @@ def __init__(self, config) -> None:
self.environment["model_state_name"] = config[self.environment_set][self.env][
"model_state_name"
]
self.environment["sleep"] = config[self.environment_set][self.env][
"sleep"
]
self.environment["punish_ineffective_vel"] = config["settings"]["reward_params"]["punish_ineffective_vel"]
self.environment["punish_zig_zag_value"] = config["settings"]["reward_params"]["punish_zig_zag_value"]
self.environment["reward_function_tuning"] = config["settings"]["reward_params"]["function"]
self.environment["beta_1"] = config["settings"]["reward_params"]["beta_1"]


# Training/inference
self.environment["mode"] = config["settings"]["mode"]
self.environment["retrain_ddpg_tf_actor_model_name"] = config["retraining"][
"ddpg"
]["retrain_ddpg_tf_actor_model_name"]
self.environment["retrain_ddpg_tf_critic_model_name"] = config["retraining"][
"ddpg"
]["retrain_ddpg_tf_critic_model_name"]
self.environment["retrain_ddpg_tf_actor_model_name"] = f"{config['retraining']['ddpg']['retrain_ddpg_tf_model_name']}/ACTOR"
self.environment["retrain_ddpg_tf_critic_model_name"] = f"{config['retraining']['ddpg']['retrain_ddpg_tf_model_name']}/CRITIC"
self.environment["inference_ddpg_tf_actor_model_name"] = config["inference"][
"ddpg"
]["inference_ddpg_tf_actor_model_name"]
Expand Down
Empty file modified rl_studio/agents/f1/train_ddpg.py
100644 → 100755
Empty file.
Loading

0 comments on commit 9083af2

Please sign in to comment.