You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
When I tried to use tools/misc/vis_cam.py(--target-layers: head.transformer.decoder.layers[2].attentions[0].attn),I encountered the following problem:
Traceback (most recent call last):
File "/SimVG/tools/misc/vis_cam.py", line 224, in
main()
File "SimVG/tools/misc/vis_cam.py", line 167, in main
grayscale_cam = det_cam_visualizer(image, targets=targets, aug_smooth=args.aug_smooth, eigen_smooth=args.eigen_smooth)
File "SimVG/simvg/utils/det_cam_visualizer.py", line 268, in call
return self.cam(img, targets, aug_smooth, eigen_smooth)[0, :]
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/pytorch_grad_cam/base_cam.py", line 184, in call
return self.forward(input_tensor,
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/pytorch_grad_cam/base_cam.py", line 74, in forward
outputs = self.activations_and_grads(input_tensor)
File "SimVG/simvg/utils/det_cam_visualizer.py", line 430, in call
return self.model(x)
File "SimVG/simvg/utils/det_cam_visualizer.py", line 162, in call
loss = self.detector(return_loss=True, **self.input_data)[0]
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/mmcv/runner/fp16_utils.py", line 119, in new_func
return old_func(*args, **kwargs)
File "/SimVG/simvg/models/det_seg/base.py", line 25, in forward
return self.forward_train(img, ref_expr_inds, img_metas, **kwargs)
File "SimVG/simvg/models/det_seg/mix_detr_mb.py", line 54, in forward_train
losses_dict, output = self.head.forward_train(
File "/SimVG/simvg/models/heads/tgqs_kd_detr_head/tgqs_kd_detr_head.py", line 458, in forward_train
output = self.forward_general(x_mm, img_metas, cls_feat=cls_feat, text_feat=text_feat, text_mask=text_mask)
File "SimVG/simvg/models/heads/tgqs_kd_detr_head/tgqs_kd_detr_head.py", line 425, in forward_general
hidden_states, _ = self.transformer(x_mm, img_masks, query_embed, pos_embed)
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/SimVG/simvg/models/heads/tgqs_kd_detr_head/transformer.py", line 225, in forward
decoder_output = self.decoder(
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/SimVG/simvg/models/heads/tgqs_kd_detr_head/transformer.py", line 168, in forward
query = layer(
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/SimVG/tools/detrex/detrex/layers/transformer.py", line 155, in forward
query = self.attentions[attn_index](
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "SimVG/tools/detrex/detrex/layers/attention.py", line 130, in forward
out = self.attn(
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1547, in _call_impl
hook_result = hook(self, args, result)
File "SimVG/simvg/utils/det_cam_visualizer.py", line 411, in save_activation
activation = self.reshape_transform(activation[0])
File "/SimVG/simvg/utils/det_cam_visualizer.py", line 69, in reshape_transform
activations.append(torch.nn.functional.interpolate(torch.abs(feat), max_shape, mode="bilinear"))
File "/home/li/.conda/envs/simvg/lib/python3.10/site-packages/torch/nn/functional.py", line 3959, in interpolate
return torch._C._nn.upsample_bilinear2d(input, output_size, align_corners, scale_factors)
RuntimeError: Non-empty 4D data tensor expected but got a tensor with sizes [1, 0, 20, 20]
When I tried to use tools/misc/vis_cam.py(--target-layers: head.transformer.decoder.layers[2].attentions[0].attn),I encountered the following problem:
Originally posted by @5nmPlus in #9 (comment)
The text was updated successfully, but these errors were encountered: