forked from zcaicaros/L2D
-
Notifications
You must be signed in to change notification settings - Fork 0
/
PPO.py
155 lines (138 loc) · 6.26 KB
/
PPO.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
from copy import deepcopy
import torch
import torch.nn as nn
from mb_agg import aggr_obs
from models.actor_critic import ActorCritic
from Params import configs
device = torch.device(configs.device)
class PPO:
def __init__(self,
lr,
gamma,
k_epochs,
eps_clip,
n_j,
n_m,
num_layers,
neighbor_pooling_type,
input_dim,
hidden_dim,
num_mlp_layers_feature_extract,
num_mlp_layers_actor,
hidden_dim_actor,
num_mlp_layers_critic,
hidden_dim_critic,
):
self.lr = lr
self.gamma = gamma
self.eps_clip = eps_clip
self.k_epochs = k_epochs
self.policy = ActorCritic(n_j=n_j,
n_m=n_m,
num_layers=num_layers,
learn_eps=False,
neighbor_pooling_type=neighbor_pooling_type,
input_dim=input_dim,
hidden_dim=hidden_dim,
num_mlp_layers_feature_extract=num_mlp_layers_feature_extract,
num_mlp_layers_actor=num_mlp_layers_actor,
hidden_dim_actor=hidden_dim_actor,
num_mlp_layers_critic=num_mlp_layers_critic,
hidden_dim_critic=hidden_dim_critic,
device=device)
self.policy_old = deepcopy(self.policy)
self.policy_old.load_state_dict(self.policy.state_dict())
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,
step_size=configs.decay_step_size,
gamma=configs.decay_ratio)
self.V_loss_2 = nn.MSELoss()
def update(self, memories, n_tasks, g_pool):
vloss_coef = configs.vloss_coef
ploss_coef = configs.ploss_coef
entloss_coef = configs.entloss_coef
rewards_all_env = []
adj_mb_t_all_env = []
fea_mb_t_all_env = []
candidate_mb_t_all_env = []
mask_mb_t_all_env = []
a_mb_t_all_env = []
old_logprobs_mb_t_all_env = []
# store data for all env
for i in range(len(memories)):
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(memories[i].r_mb), reversed(memories[i].done_mb)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
rewards = torch.tensor(rewards, dtype=torch.float).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
rewards_all_env.append(rewards)
# process each env data
adj_mb_t_all_env.append(aggr_obs(torch.stack(memories[i].adj_mb).to(device), n_tasks))
fea_mb_t = torch.stack(memories[i].fea_mb).to(device)
fea_mb_t = fea_mb_t.reshape(-1, fea_mb_t.size(-1))
fea_mb_t_all_env.append(fea_mb_t)
candidate_mb_t_all_env.append(torch.stack(memories[i].candidate_mb).to(device).squeeze())
mask_mb_t_all_env.append(torch.stack(memories[i].mask_mb).to(device).squeeze())
a_mb_t_all_env.append(torch.stack(memories[i].a_mb).to(device).squeeze())
old_logprobs_mb_t_all_env.append(torch.stack(memories[i].logprobs).to(device).squeeze().detach())
# get batch argument for net forwarding: mb_g_pool is same for all env
mb_g_pool = g_pool_cal(g_pool, torch.stack(memories[0].adj_mb).to(device).shape, n_tasks, device)
# Optimize policy for K epochs:
for _ in range(self.k_epochs):
loss_sum = 0
vloss_sum = 0
for i in range(len(memories)):
pis, vals = self.policy(x=fea_mb_t_all_env[i],
graph_pool=mb_g_pool,
adj=adj_mb_t_all_env[i],
candidate=candidate_mb_t_all_env[i],
mask=mask_mb_t_all_env[i],
padded_nei=None)
logprobs, ent_loss = eval_actions(pis.squeeze(), a_mb_t_all_env[i])
ratios = torch.exp(logprobs - old_logprobs_mb_t_all_env[i].detach())
advantages = rewards_all_env[i] - vals.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1 - self.eps_clip, 1 + self.eps_clip) * advantages
v_loss = self.V_loss_2(vals.squeeze(), rewards_all_env[i])
p_loss = - torch.min(surr1, surr2)
ent_loss = - ent_loss.clone()
loss = vloss_coef * v_loss + ploss_coef * p_loss + entloss_coef * ent_loss
loss_sum += loss
vloss_sum += v_loss
self.optimizer.zero_grad()
loss_sum.mean().backward()
self.optimizer.step()
# Copy new weights into old policy:
self.policy_old.load_state_dict(self.policy.state_dict())
if configs.decayflag:
self.scheduler.step()
return loss_sum.mean().item(), vloss_sum.mean().item()
class Memory:
def __init__(self):
self.adj_mb = []
self.fea_mb = []
self.candidate_mb = []
self.mask_mb = []
self.a_mb = []
self.r_mb = []
self.done_mb = []
self.logprobs = []
def add_step(self, adj, fea, candidate, mask, a_idx):
self.adj_mb.append(adj)
self.fea_mb.append(fea)
self.candidate_mb.append(candidate)
self.mask_mb.append(mask)
self.a_mb.append(a_idx)
def clear_memory(self):
del self.adj_mb[:]
del self.fea_mb[:]
del self.candidate_mb[:]
del self.mask_mb[:]
del self.a_mb[:]
del self.r_mb[:]
del self.done_mb[:]
del self.logprobs[:]