From 9352f8a1f59697145485f7ad92d6de0b51586155 Mon Sep 17 00:00:00 2001 From: Jaeyong Kang Date: Fri, 3 Nov 2023 09:13:44 +0800 Subject: [PATCH] Update video_regression.py --- model/video_regression.py | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/model/video_regression.py b/model/video_regression.py index 6dfef16b..e2d31296 100644 --- a/model/video_regression.py +++ b/model/video_regression.py @@ -19,7 +19,6 @@ def __init__(self, n_layers=2, d_model=64, dropout=0.1, max_sequence_video=300, self.total_vf_dim = total_vf_dim self.regModel = regModel - #self.lstm = nn.LSTM(self.total_vf_dim, self.d_model, self.nlayers, bidirectional=True) self.bilstm = nn.LSTM(self.total_vf_dim, self.d_model, self.nlayers, bidirectional=True) self.bigru = nn.GRU(self.total_vf_dim, self.d_model, self.nlayers, bidirectional=True) self.bifc = nn.Linear(self.d_model * 2, 2) @@ -28,10 +27,6 @@ def __init__(self, n_layers=2, d_model=64, dropout=0.1, max_sequence_video=300, self.gru = nn.GRU(self.total_vf_dim, self.d_model, self.nlayers) self.fc = nn.Linear(self.d_model, 2) - # self.attention = nn.Sequential( - # nn.Linear(self.total_vf_dim, 1), - # nn.Tanh(), - # ) def forward(self, feature_semantic_list, feature_scene_offset, feature_motion, feature_emotion): ### Video (SemanticList + SceneOffset + Motion + Emotion) (ENCODER) ### @@ -43,9 +38,6 @@ def forward(self, feature_semantic_list, feature_scene_offset, feature_motion, f vf_concat = torch.cat([vf_concat, feature_motion.unsqueeze(-1).float()], dim=-1) vf_concat = torch.cat([vf_concat, feature_emotion.float()], dim=-1) - # hidden_state = torch.zeros(2, vf_concat.shape[0] , self.d_model).to(get_device()) - # cell_state = torch.zeros(2, vf_concat.shape[0] , self.d_model).to(get_device()) - vf_concat = vf_concat.permute(1,0,2) vf_concat = F.dropout(vf_concat, p=self.dropout, training=self.training) @@ -66,18 +58,4 @@ def forward(self, feature_semantic_list, feature_scene_offset, feature_motion, f out = out.permute(1,0,2) out = self.fc(out) return out - - # # Add attention mechanism - # attn_weights = F.softmax(self.attention(vf_concat), dim=1) - # out = torch.bmm(attn_weights.transpose(1, 2), vf_concat) - - # # Add attention mechanism - # attn_weights = F.softmax(self.attention(out), dim=1) - # out = torch.bmm(attn_weights.transpose(1, 2), out) - - # # Add regularization with dropout - # out = F.dropout(out, p=self.dropout, training=self.training) - # out, _ = self.lstm(out) - # out = self.fc(out) - # return out \ No newline at end of file