-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
303 lines (245 loc) · 10.4 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
import inspect
import math
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
import losses
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.cider.cider import Cider
#from pycocoevalcap.meteor.meteor import Meteor
class LossChecker:
def __init__(self, num_losses):
self.num_losses = num_losses
self.losses = [ [] for _ in range(self.num_losses) ]
def update(self, *loss_vals):
assert len(loss_vals) == self.num_losses
for i, loss_val in enumerate(loss_vals):
self.losses[i].append(loss_val)
def mean(self, last=0):
mean_losses = [ 0. for _ in range(self.num_losses) ]
for i, loss in enumerate(self.losses):
_loss = loss[-last:]
mean_losses[i] = sum(_loss) / len(_loss)
return mean_losses
def parse_batch(batch):
vids, feats, captions = batch
#变量放在gpu上
feats = [ feat.cuda() for feat in feats ]
feats = torch.cat(feats, dim=2)
#函数将数字或字符串转换为一个长整型
captions = captions.long().cuda()
return vids, feats, captions
def train(e, model, optimizer, train_iter, vocab, teacher_forcing_ratio, reg_lambda, recon_lambda, gradient_clip):
model.train()
loss_checker = LossChecker(4)
PAD_idx = vocab.word2idx['<PAD>']
#Python进度条
t = tqdm(train_iter)
for batch in t:
#变量放GPU上
_, feats, captions = parse_batch(batch)
optimizer.zero_grad()
#解码器输出、重构器
output, feats_recon = model(feats, captions, teacher_forcing_ratio)
#最大似然 / log似然代价函数NLLLoss的输入是一个对数概率向量和一个目标标签. 它不会为我们计算对数概率. 适合网络的最后一层是log_softmax. 交叉熵损失
#
cross_entropy_loss = F.nll_loss(output[1:].view(-1, vocab.n_vocabs),
captions[1:].contiguous().view(-1),
ignore_index=PAD_idx)
#???????????????????????????????????????
entropy_loss = losses.entropy_loss(output[1:], ignore_mask=(captions[1:] == PAD_idx))
#reg_lambda=0
loss = cross_entropy_loss + reg_lambda * entropy_loss
if model.reconstructor is None:
reconstruction_loss = torch.zeros(1)
else:
if model.reconstructor._type == 'global':
reconstruction_loss = losses.global_reconstruction_loss(feats, feats_recon, keep_mask=(captions != PAD_idx))
else:
reconstruction_loss = losses.local_reconstruction_loss(feats, feats_recon)
loss += recon_lambda * reconstruction_loss
loss.backward()
if gradient_clip is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), gradient_clip)
optimizer.step()
loss_checker.update(loss.item(), cross_entropy_loss.item(), entropy_loss.item(), reconstruction_loss.item())
#.3f 保留三位小数
t.set_description("[Epoch #{0}] loss: {3:.3f} = (CE: {4:.3f}) + (Ent: {1} * {5:.3f}) + (Rec: {2} * {6:.3f})".format(e, reg_lambda, recon_lambda, *loss_checker.mean(last=10)))
total_loss, cross_entropy_loss, entropy_loss, reconstruction_loss = loss_checker.mean()
loss = {
'total': total_loss,
'cross_entropy': cross_entropy_loss,
'entropy': entropy_loss,
'reconstruction': reconstruction_loss,
}
return loss
def test(model, val_iter, vocab, reg_lambda, recon_lambda):
model.eval()
loss_checker = LossChecker(4)
PAD_idx = vocab.word2idx['<PAD>']
for b, batch in enumerate(val_iter, 1):
_, feats, captions = parse_batch(batch)
output, feats_recon = model(feats)
cross_entropy_loss = F.nll_loss(output[1:].view(-1, vocab.n_vocabs),
captions[1:].contiguous().view(-1),
ignore_index=PAD_idx)
entropy_loss = losses.entropy_loss(output[1:], ignore_mask=(captions[1:] == PAD_idx))
if model.reconstructor is None:
reconstruction_loss = torch.zeros(1)
elif model.reconstructor._type == 'global':
reconstruction_loss = losses.global_reconstruction_loss(feats, feats_recon, keep_mask=(captions != PAD_idx))
else:
reconstruction_loss = losses.local_reconstruction_loss(feats, feats_recon)
loss = cross_entropy_loss + reg_lambda * entropy_loss + recon_lambda * reconstruction_loss
loss_checker.update(loss.item(), cross_entropy_loss.item(), entropy_loss.item(), reconstruction_loss.item())
total_loss, cross_entropy_loss, entropy_loss, reconstruction_loss = loss_checker.mean()
loss = {
'total': total_loss,
'cross_entropy': cross_entropy_loss,
'entropy': entropy_loss,
'reconstruction': reconstruction_loss,
}
return loss
def get_predicted_captions(data_iter, model, vocab, beam_width=5, beam_alpha=0.):
def build_onlyonce_iter(data_iter):
onlyonce_dataset = {}
for batch in iter(data_iter):
vids, feats, _ = parse_batch(batch)
for vid, feat in zip(vids, feats):
if vid not in onlyonce_dataset:
onlyonce_dataset[vid] = feat
onlyonce_iter = []
vids = onlyonce_dataset.keys()
feats = onlyonce_dataset.values()
batch_size = 100
while len(vids) > 0:
onlyonce_iter.append(( list(vids)[:batch_size], torch.stack(list(feats)[:batch_size]) ))
vids = list(vids)[batch_size:]
#????????原本是两个vids
feats = list(feats)[batch_size:]
return onlyonce_iter
model.eval()
onlyonce_iter = build_onlyonce_iter(data_iter)
vid2pred = {}
EOS_idx = vocab.word2idx['<EOS>']
for vids, feats in onlyonce_iter:
captions = model.describe(feats, beam_width=beam_width, beam_alpha=beam_alpha)
captions = [ idxs_to_sentence(caption, vocab.idx2word, EOS_idx) for caption in captions ]
vid2pred.update({ v: p for v, p in zip(vids, captions) })
return vid2pred
def get_groundtruth_captions(data_iter, vocab):
vid2GTs = {}
EOS_idx = vocab.word2idx['<EOS>']
for batch in iter(data_iter):
vids, _, captions = parse_batch(batch)
captions = captions.transpose(0, 1)
for vid, caption in zip(vids, captions):
if vid not in vid2GTs:
vid2GTs[vid] = []
caption = idxs_to_sentence(caption, vocab.idx2word, EOS_idx)
vid2GTs[vid].append(caption)
return vid2GTs
def score(vid2pred, vid2GTs):
assert set(vid2pred.keys()) == set(vid2GTs.keys())
vid2idx = { v: i for i, v in enumerate(vid2pred.keys()) }
refs = { vid2idx[vid]: GTs for vid, GTs in vid2GTs.items() }
hypos = { vid2idx[vid]: [ pred ] for vid, pred in vid2pred.items() }
scores = calc_scores(refs, hypos)
return scores
# refers: https://github.com/zhegan27/SCN_for_video_captioning/blob/master/SCN_evaluation.py
def calc_scores(ref, hypo):
"""
ref, dictionary of reference sentences (id, sentence)
hypo, dictionary of hypothesis sentences (id, sentence)
score, dictionary of scores
"""
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
final_scores = {}
for scorer, method in scorers:
score, scores = scorer.compute_score(ref, hypo)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
def evaluate(data_iter, model, vocab, beam_width=5, beam_alpha=0.):
#得到预测的句子编码
vid2pred = get_predicted_captions(data_iter, model, vocab, beam_width=5, beam_alpha=0.)
vid2GTs = get_groundtruth_captions(data_iter, vocab)
scores = score(vid2pred, vid2GTs)
return scores
# refers: https://stackoverflow.com/questions/52660985/pytorch-how-to-get-learning-rate-during-training
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def idxs_to_sentence(idxs, idx2word, EOS_idx):
words = []
for idx in idxs[1:]:
idx = idx.item()
if idx == EOS_idx:
break
word = idx2word[idx]
words.append(word)
sentence = ' '.join(words)
return sentence
def cls_to_dict(cls):
properties = dir(cls)
properties = [ p for p in properties if not p.startswith("__") ]
d = {}
for p in properties:
v = getattr(cls, p)
if inspect.isclass(v):
v = cls_to_dict(v)
v['was_class'] = True
d[p] = v
return d
# refers https://stackoverflow.com/questions/1305532/convert-nested-python-dict-to-object
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def dict_to_cls(d):
cls = Struct(**d)
properties = dir(cls)
properties = [ p for p in properties if not p.startswith("__") ]
for p in properties:
v = getattr(cls, p)
if isinstance(v, dict) and 'was_class' in v and v['was_class']:
v = dict_to_cls(v)
setattr(cls, p, v)
return cls
def load_checkpoint(model, ckpt_fpath):
checkpoint = torch.load(ckpt_fpath)
model.decoder.load_state_dict(checkpoint['decoder'])
if model.reconstructor and checkpoint['reconstructor']:
model.reconstructor.load_state_dict(checkpoint['reconstructor'])
return model
def save_checkpoint(e, model, ckpt_fpath, config):
ckpt_dpath = os.path.dirname(ckpt_fpath)
if not os.path.exists(ckpt_dpath):
os.makedirs(ckpt_dpath)
torch.save({
'epoch': e,
'decoder': model.decoder.state_dict(),
'reconstructor': model.reconstructor.state_dict() if model.reconstructor else None,
'config': cls_to_dict(config),
}, ckpt_fpath)
def save_result(vid2pred, vid2GTs, save_fpath):
#assert set(vid2pred.keys()) == set(vid2GTs.keys())
save_dpath = os.path.dirname(save_fpath)
if not os.path.exists(save_dpath):
os.makedirs(save_dpath)
vids = vid2pred.keys()
with open(save_fpath, 'w') as fout:
for vid in vids:
GTs = ' / '.join(vid2GTs[vid])
pred = vid2pred[vid]
line = ', '.join([ str(vid), pred, GTs ])
fout.write("{}\n".format(line))