Skip to content

Commit

Permalink
Update video_regression.py
Browse files Browse the repository at this point in the history
  • Loading branch information
kjysmu authored Nov 3, 2023
1 parent 9bb1e7d commit 9352f8a
Showing 1 changed file with 0 additions and 22 deletions.
22 changes: 0 additions & 22 deletions model/video_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ def __init__(self, n_layers=2, d_model=64, dropout=0.1, max_sequence_video=300,
self.total_vf_dim = total_vf_dim
self.regModel = regModel

#self.lstm = nn.LSTM(self.total_vf_dim, self.d_model, self.nlayers, bidirectional=True)
self.bilstm = nn.LSTM(self.total_vf_dim, self.d_model, self.nlayers, bidirectional=True)
self.bigru = nn.GRU(self.total_vf_dim, self.d_model, self.nlayers, bidirectional=True)
self.bifc = nn.Linear(self.d_model * 2, 2)
Expand All @@ -28,10 +27,6 @@ def __init__(self, n_layers=2, d_model=64, dropout=0.1, max_sequence_video=300,
self.gru = nn.GRU(self.total_vf_dim, self.d_model, self.nlayers)
self.fc = nn.Linear(self.d_model, 2)

# self.attention = nn.Sequential(
# nn.Linear(self.total_vf_dim, 1),
# nn.Tanh(),
# )

def forward(self, feature_semantic_list, feature_scene_offset, feature_motion, feature_emotion):
### Video (SemanticList + SceneOffset + Motion + Emotion) (ENCODER) ###
Expand All @@ -43,9 +38,6 @@ def forward(self, feature_semantic_list, feature_scene_offset, feature_motion, f
vf_concat = torch.cat([vf_concat, feature_motion.unsqueeze(-1).float()], dim=-1)
vf_concat = torch.cat([vf_concat, feature_emotion.float()], dim=-1)

# hidden_state = torch.zeros(2, vf_concat.shape[0] , self.d_model).to(get_device())
# cell_state = torch.zeros(2, vf_concat.shape[0] , self.d_model).to(get_device())

vf_concat = vf_concat.permute(1,0,2)
vf_concat = F.dropout(vf_concat, p=self.dropout, training=self.training)

Expand All @@ -66,18 +58,4 @@ def forward(self, feature_semantic_list, feature_scene_offset, feature_motion, f
out = out.permute(1,0,2)
out = self.fc(out)
return out

# # Add attention mechanism
# attn_weights = F.softmax(self.attention(vf_concat), dim=1)
# out = torch.bmm(attn_weights.transpose(1, 2), vf_concat)

# # Add attention mechanism
# attn_weights = F.softmax(self.attention(out), dim=1)
# out = torch.bmm(attn_weights.transpose(1, 2), out)

# # Add regularization with dropout
# out = F.dropout(out, p=self.dropout, training=self.training)

# out, _ = self.lstm(out)
# out = self.fc(out)
# return out

0 comments on commit 9352f8a

Please sign in to comment.