Skip to content

Commit

Permalink
update configs
Browse files Browse the repository at this point in the history
  • Loading branch information
wenzhangliu committed Jan 17, 2025
1 parent 978021e commit a6b9c3d
Show file tree
Hide file tree
Showing 10 changed files with 10 additions and 10 deletions.
2 changes: 1 addition & 1 deletion xuance/configs/a2c/classic_control/CartPole-v1.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ agent: "A2C"
env_name: "Classic Control"
env_id: "CartPole-v1"
env_seed: 1
vectorize: "SubprocVecEnv"
vectorize: "DummyVecEnv"
learner: "A2C_Learner"
policy: "Categorical_AC"
representation: "Basic_MLP"
Expand Down
2 changes: 1 addition & 1 deletion xuance/configs/coma/mpe/simple_spread_v3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ learner: "COMA_Learner"
policy: "Categorical_COMA_Policy" # Name of policy.
representation: "Basic_MLP" # Name of representation.
representation_critic: "Basic_MLP" # Name of representation for critic.
vectorize: "SubprocVecMultiAgentEnv" # Method to vectorize the environment.
vectorize: "DummyVecMultiAgentEnv" # Method to vectorize the environment.
runner: "MARL" # Runner.

# recurrent settings for Basic_RNN representation
Expand Down
2 changes: 1 addition & 1 deletion xuance/configs/iac/mpe/simple_spread_v3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ continuous_action: False
learner: "IAC_Learner"
policy: "Categorical_MAAC_Policy"
representation: "Basic_MLP"
vectorize: "SubprocVecMultiAgentEnv"
vectorize: "DummyVecMultiAgentEnv"
runner: "MARL"

# recurrent settings for Basic_RNN representation
Expand Down
2 changes: 1 addition & 1 deletion xuance/configs/ippo/mpe/simple_spread_v3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ continuous_action: True # If to use continuous control.
learner: "IPPO_Learner" # The learner name.
policy: "Gaussian_MAAC_Policy" # The policy name.
representation: "Basic_MLP" # The representation name.
vectorize: "SubprocVecMultiAgentEnv" # The method to vectorize your environment such that can run in parallel.
vectorize: "DummyVecMultiAgentEnv" # The method to vectorize your environment such that can run in parallel.
runner: "MARL" # The runner.

# recurrent settings for Basic_RNN representation.
Expand Down
2 changes: 1 addition & 1 deletion xuance/configs/maddpg/mpe/simple_push_v3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ continuous_action: True
learner: "MADDPG_Learner"
policy: "MADDPG_Policy"
representation: "Basic_Identical"
vectorize: "SubprocVecMultiAgentEnv"
vectorize: "DummyVecMultiAgentEnv"
runner: "RunnerCompetition"

representation_hidden_size: [] # the units for each hidden layer
Expand Down
2 changes: 1 addition & 1 deletion xuance/configs/maddpg/mpe/simple_spread_v3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ continuous_action: True
learner: "MADDPG_Learner"
policy: "MADDPG_Policy"
representation: "Basic_Identical"
vectorize: "SubprocVecMultiAgentEnv"
vectorize: "DummyVecMultiAgentEnv"
runner: "MARL"

representation_hidden_size: [] # the units for each hidden layer
Expand Down
2 changes: 1 addition & 1 deletion xuance/configs/mappo/mpe/simple_spread_v3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ continuous_action: True # If to use continuous control.
learner: "MAPPO_Clip_Learner"
policy: "Gaussian_MAAC_Policy" # The policy name.
representation: "Basic_MLP" # The representation name.
vectorize: "SubprocVecMultiAgentEnv" # The method to vectorize your environment such that can run in parallel.
vectorize: "DummyVecMultiAgentEnv" # The method to vectorize your environment such that can run in parallel.
runner: "MARL" # The runner.

# recurrent settings for Basic_RNN representation.
Expand Down
2 changes: 1 addition & 1 deletion xuance/configs/ppo/metadrive.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ env_seed: 1
env_config: # the configs for MetaDrive environment
map: "C" # see https://metadrive-simulator.readthedocs.io/en/latest/rl_environments.html#generalization-environment for choices
render: False
vectorize: "SubprocVecEnv"
vectorize: "DummyVecEnv"
learner: "PPOCLIP_Learner"
policy: "Gaussian_AC" # choice: Gaussian_AC for continuous actions, Categorical_AC for discrete actions.
representation: "Basic_MLP"
Expand Down
2 changes: 1 addition & 1 deletion xuance/configs/sac/metadrive.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ env_seed: 1
env_config: # the configs for MetaDrive environment
map: "C" # see https://metadrive-simulator.readthedocs.io/en/latest/rl_environments.html#generalization-environment for choices
render: False
vectorize: "SubprocVecEnv"
vectorize: "DummyVecEnv"
learner: "SAC_Learner"
policy: "Gaussian_SAC"
representation: "Basic_Identical"
Expand Down
2 changes: 1 addition & 1 deletion xuance/configs/vdac/mpe/simple_spread_v3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ continuous_action: False
learner: "VDAC_Learner"
policy: "Categorical_MAAC_Policy"
representation: "Basic_MLP"
vectorize: "SubprocVecMultiAgentEnv"
vectorize: "DummyVecMultiAgentEnv"
runner: "MARL"

# recurrent settings for Basic_RNN representation
Expand Down

0 comments on commit a6b9c3d

Please sign in to comment.