Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

error error error full of this #13

Open
shreesha345 opened this issue Jan 16, 2022 · 0 comments
Open

error error error full of this #13

shreesha345 opened this issue Jan 16, 2022 · 0 comments

Comments

@shreesha345
Copy link

shreesha345 commented Jan 16, 2022

```Working` with z of shape (1, 256, 16, 16) = 65536 dimensions.
loaded pretrained LPIPS loss from taming/modules/autoencoder/lpips/vgg.pth
VQLPIPSWithDiscriminator running with hinge loss.
Restored from models/vqgan_imagenet_f16_16384.ckpt
Using device: cuda:0
Optimising using: Adam
Using text prompts: ['underwater city']
Using seed: 1698681138380486500
0/? [00:00<?, ?it/s]
Oops: runtime error: solve: MAGMA library not found in compilation. Please rebuild with MAGMA.
Try reducing --num-cuts to save memory

RuntimeError Traceback (most recent call last)
/tmp/ipykernel_58/2225298613.py in
21 settings = clipit.apply_settings()
22 clipit.do_init(settings)
---> 23 clipit.do_run(settings)

/kaggle/working/clipit/clipit.py in do_run(args)
997 print("Oops: runtime error: ", e)
998 print("Try reducing --num-cuts to save memory")
--> 999 raise e
1000 except KeyboardInterrupt:
1001 pass

/kaggle/working/clipit/clipit.py in do_run(args)
989 while True:
990 try:
--> 991 train(args, cur_iteration)
992 if cur_iteration == args.iterations:
993 break

/kaggle/working/clipit/clipit.py in train(args, cur_it)
902
903 for i in range(args.batches):
--> 904 lossAll = ascend_txt(args)
905
906 if i == 0 and cur_it % args.save_every == 0:

/kaggle/working/clipit/clipit.py in ascend_txt(args)
723 for cutoutSize in cutoutsTable:
724 make_cutouts = cutoutsTable[cutoutSize]
--> 725 cur_cutouts[cutoutSize] = make_cutouts(out)
726
727 if args.spot_prompts:

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

/kaggle/working/clipit/clipit.py in forward(self, input, spot)
352 # TF.to_pil_image(batch[j_wide].cpu()).save(f"cached_im_{cur_iteration:02d}{j_wide:02d}{spot}.png")
353 else:
--> 354 batch1, transforms1 = self.augs_zoom(torch.cat(cutouts[:self.cutn_zoom], dim=0))
355 batch2, transforms2 = self.augs_wide(torch.cat(cutouts[self.cutn_zoom:], dim=0))
356 # print(batch1.shape, batch2.shape)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(self, input)
137 def forward(self, input):
138 for module in self:
--> 139 input = module(input)
140 return input
141

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/kornia/augmentation/augmentation.py in forward(self, input, params, return_transform)
1141 input_pad = self.compute_padding(input_temp.shape)
1142 _input = self.precrop_padding(input_temp, input_pad) # type: ignore
-> 1143 out = super().forward(_input, params, return_transform)
1144
1145 # Update the actual input size for inverse

/opt/conda/lib/python3.7/site-packages/kornia/augmentation/base.py in forward(self, input, params, return_transform)
243
244 self._params = params
--> 245 output = self.apply_func(in_tensor, in_transform, self._params, return_transform)
246 return _transform_output_shape(output, ori_shape) if self.keepdim else output
247

/opt/conda/lib/python3.7/site-packages/kornia/augmentation/base.py in apply_func(self, in_tensor, in_transform, params, return_transform)
202 # if all data needs to be augmented
203 elif torch.sum(to_apply) == len(to_apply):
--> 204 trans_matrix = self.compute_transformation(in_tensor, params)
205 output = self.apply_transform(in_tensor, params, trans_matrix)
206 else:

/opt/conda/lib/python3.7/site-packages/kornia/augmentation/augmentation.py in compute_transformation(self, input, params)
1063
1064 def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:
-> 1065 transform: torch.Tensor = get_perspective_transform(params['src'].to(input), params['dst'].to(input))
1066 return transform
1067

/opt/conda/lib/python3.7/site-packages/kornia/geometry/transform/imgwarp.py in get_perspective_transform(src, dst)
281
282 # solve the system Ax = b
--> 283 X, LU = _torch_solve_cast(b, A)
284
285 # create variable to return

/opt/conda/lib/python3.7/site-packages/kornia/utils/helpers.py in _torch_solve_cast(input, A)
94 dtype = torch.float32
95
---> 96 out1, out2 = torch.solve(input.to(dtype), A.to(dtype))
97
98 return (out1.to(input.dtype), out2.to(input.dtype))

RuntimeError: solve: MAGMA library not found in compilation. Please rebuild with MAGMA.```

here is the kaggle code please fix it 😭
https://www.kaggle.com/shreeshaaithal/notebookf22d408364

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant