Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat1 in method wrapper_CUDA_addmm) #369

Open
Cometzyc opened this issue Aug 18, 2024 · 1 comment

Comments

@Cometzyc
Copy link


RuntimeError Traceback (most recent call last)
Cell In[25], line 3
1 from elegantrl.train.run import train_agent
----> 3 train_agent(args)

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\elegantrl\train\run.py:89, in train_agent(failed resolving arguments)
86 buffer[:] = buffer_items
88 torch.set_grad_enabled(True)
---> 89 logging_tuple = agent.update_net(buffer)
90 torch.set_grad_enabled(False)
92 evaluator.evaluate_and_save(actor=agent.act, steps=horizon_len, exp_r=exp_r, logging_tuple=logging_tuple)

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\elegantrl\agents\AgentSAC.py:29, in AgentSAC.update_net(self, buffer)
25 with torch.no_grad():
26 states, actions, rewards, undones = buffer.add_item
27 self.update_avg_std_for_normalization(
28 states=states.reshape((-1, self.state_dim)),
---> 29 returns=self.get_cumulative_rewards(rewards=rewards, undones=undones).reshape((-1,))
30 )
32 '''update network'''
33 obj_critics = 0.0

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\elegantrl\agents\AgentBase.py:190, in AgentBase.get_cumulative_rewards(self, rewards, undones)
188 last_state = self.last_state
189 next_action = self.act_target(last_state)
--> 190 next_value = self.cri_target(last_state, next_action).detach()
191 for t in range(horizon_len - 1, -1, -1):
192 returns[t] = next_value = rewards[t] + masks[t] * next_value

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\torch\nn\modules\module.py:1553, in Module._wrapped_call_impl(self, *args, **kwargs)
1551 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1552 else:
-> 1553 return self._call_impl(*args, **kwargs)

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\torch\nn\modules\module.py:1562, in Module._call_impl(self, *args, **kwargs)
1557 # If we don't have any hooks, we want to skip the rest of the logic in
1558 # this function, and just call forward.
1559 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1560 or _global_backward_pre_hooks or _global_backward_hooks
1561 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1562 return forward_call(*args, **kwargs)
1564 try:
1565 result = None

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\elegantrl\agents\net.py:415, in CriticEnsemble.forward(self, state, action)
414 def forward(self, state, action):
--> 415 values = self.get_q_values(state=state, action=action)
416 value = values.mean(dim=1, keepdim=True)
417 return value

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\elegantrl\agents\net.py:422, in CriticEnsemble.get_q_values(self, state, action)
420 state = self.state_norm(state)
421 sa_tmp = self.encoder_sa(torch.cat((state, action), dim=1))
--> 422 values = torch.concat([dec_q(sa_tmp) for dec_q in self.decoder_qs], dim=1)
423 values = self.value_re_norm(values)
424 return values

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\elegantrl\agents\net.py:422, in (.0)
420 state = self.state_norm(state)
421 sa_tmp = self.encoder_sa(torch.cat((state, action), dim=1))
--> 422 values = torch.concat([dec_q(sa_tmp) for dec_q in self.decoder_qs], dim=1)
423 values = self.value_re_norm(values)
424 return values

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\torch\nn\modules\module.py:1553, in Module._wrapped_call_impl(self, *args, **kwargs)
1551 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1552 else:
-> 1553 return self._call_impl(*args, **kwargs)

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\torch\nn\modules\module.py:1562, in Module._call_impl(self, *args, **kwargs)
1557 # If we don't have any hooks, we want to skip the rest of the logic in
1558 # this function, and just call forward.
1559 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1560 or _global_backward_pre_hooks or _global_backward_hooks
1561 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1562 return forward_call(*args, **kwargs)
1564 try:
1565 result = None

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\torch\nn\modules\container.py:219, in Sequential.forward(self, input)
217 def forward(self, input):
218 for module in self:
--> 219 input = module(input)
220 return input

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\torch\nn\modules\module.py:1553, in Module._wrapped_call_impl(self, *args, **kwargs)
1551 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1552 else:
-> 1553 return self._call_impl(*args, **kwargs)

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\torch\nn\modules\module.py:1562, in Module._call_impl(self, *args, **kwargs)
1557 # If we don't have any hooks, we want to skip the rest of the logic in
1558 # this function, and just call forward.
1559 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1560 or _global_backward_pre_hooks or _global_backward_hooks
1561 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1562 return forward_call(*args, **kwargs)
1564 try:
1565 result = None

File E:\langs\anaconda3\envs\deep-fin\lib\site-packages\torch\nn\modules\linear.py:117, in Linear.forward(self, input)
116 def forward(self, input: Tensor) -> Tensor:
--> 117 return F.linear(input, self.weight, self.bias)

RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat1 in method wrapper_CUDA_addmm)

运行的是quickstart_Pendulum_v1.ipynb

@Yonv1943
Copy link
Collaborator

上面说到:运行的是quickstart_Pendulum_v1.ipynb

回复:我可以已经修改好了出错的quickstart_Pendulum_v1.ipynb代码

我还提供可以运行Pendulum_v1 的代码,如下:

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants