From 18a984b149e20c6c9aee38de4e885bfd6f1ef706 Mon Sep 17 00:00:00 2001 From: zhangfanTJU Date: Sat, 18 Feb 2023 20:12:45 +0800 Subject: [PATCH 1/3] fix-mt5-mask-dropout --- projects/MT5/layers/attention_layer.py | 5 +++-- projects/MT5/mt5_model.py | 11 ++++++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/projects/MT5/layers/attention_layer.py b/projects/MT5/layers/attention_layer.py index 166e792bf..3cfe8c41d 100644 --- a/projects/MT5/layers/attention_layer.py +++ b/projects/MT5/layers/attention_layer.py @@ -218,7 +218,7 @@ def forward( ) else: position_bias = self.compute_bias( - real_seq_length, key_length, placement=attention_mask.placement + real_seq_length, key_length, placement=attention_scores.placement ) if past_key_value is not None: @@ -228,13 +228,14 @@ def forward( if use_cache: attention_mask = attention_mask.expand_as(attention_scores) + attention_dropout_prob = self.attention_dropout_prob if self.training else 0.0 attention_weights = flow._C.fused_bias_add_scale_mask_softmax_dropout( attention_scores, position_bias, attention_mask, fill_value=-10000.0, scale=1, - p=self.attention_dropout_prob, + p=attention_dropout_prob, )[0] else: attention_scores = attention_scores + position_bias diff --git a/projects/MT5/mt5_model.py b/projects/MT5/mt5_model.py index a16144ff2..2f50b735f 100644 --- a/projects/MT5/mt5_model.py +++ b/projects/MT5/mt5_model.py @@ -203,7 +203,7 @@ def forward( position_bias = None encoder_decoder_position_bias = None self.set_cache(encoder_states=None, past_key_values=None) - encoder_attn_mask = self.extended_attn_mask(encoder_attn_mask) + encoder_attn_mask = self.extended_attn_mask(encoder_attn_mask) if encoder_attn_mask is not None else encoder_attn_mask enc_embedding_output = self.embedding(encoder_input_ids) # transpose [batch_size, seq_len, embed_size] to [seq_len, batch_size, embed_size] enc_hidden_states = enc_embedding_output.transpose(0, 1) @@ -219,10 +219,11 @@ def forward( if only_encoder: return encoder_states - decoder_attn_mask = self.extended_attn_mask( - decoder_attn_mask, decoder_input_ids, is_decoder=True - ) - encoder_decoder_attn_mask = self.extended_attn_mask(encoder_decoder_attn_mask) + if decoder_attn_mask is not None: + decoder_attn_mask = self.extended_attn_mask( + decoder_attn_mask, decoder_input_ids, is_decoder=True + ) + encoder_decoder_attn_mask = self.extended_attn_mask(encoder_decoder_attn_mask) if encoder_decoder_attn_mask is not None else encoder_decoder_attn_mask dec_embedding_output = self.embedding(decoder_input_ids) # transpose [batch_size, seq_len, embed_size] to [seq_len, batch_size, embed_size] From 7ad4ae472e4a265a6b590a6ab67c687fdbbb007a Mon Sep 17 00:00:00 2001 From: zhangfanTJU Date: Sun, 19 Feb 2023 16:58:59 +0800 Subject: [PATCH 2/3] fix-t5-dataset --- libai/data/datasets/t5_dataset.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/libai/data/datasets/t5_dataset.py b/libai/data/datasets/t5_dataset.py index 5b9aaf1f7..19d443d71 100644 --- a/libai/data/datasets/t5_dataset.py +++ b/libai/data/datasets/t5_dataset.py @@ -38,9 +38,7 @@ class T5Dataset(flow.utils.data.Dataset): All values are padded to this length. Defaults to 512. max_seq_length_dec (int, optional): Maximum length of the sequence passing into decoder. All values are padded to this length. Defaults to 128. - mask_lm_prob (float, optional): Probability to mask tokens. Defaults to 0.15. - max_preds_per_seq (int, optional): Maximum number of masked tokens in each sentence. - Defaults to None. + masked_lm_prob (float, optional): Probability to mask tokens. Defaults to 0.15. short_seq_prob (float, optional): Probability of producing a short sequence. Defaults to 0.0. seed (int, optional): From eb29d84e34ef6cdebce05a11adb28447d4b92708 Mon Sep 17 00:00:00 2001 From: zhangfanTJU Date: Sun, 19 Feb 2023 18:39:20 +0800 Subject: [PATCH 3/3] fix-eod-token --- tools/preprocess_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/preprocess_data.py b/tools/preprocess_data.py index fdc1a3ddf..aca71f5f0 100644 --- a/tools/preprocess_data.py +++ b/tools/preprocess_data.py @@ -97,7 +97,7 @@ def encode(self, json_line): if ( len(doc_ids) > 0 and self.args.append_eod ): # append eod token when at the enc of document - doc_ids[-1].append(Encoder.tokenizer.eod) + doc_ids[-1].append(Encoder.tokenizer.eod_token_id) ids[key] = doc_ids return ids, len(json_line)