You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Data shape for DDIM sampling is (1, 4, 64, 64), eta 0.0
Running DDIM Sampling with 50 timesteps
DDIM Sampler: 0%| | 0/50 [00:00<?, ?it/s]
0it [00:04, ?it/s]| | 0/50 [00:00<?, ?it/s]
Traceback (most recent call last):
File "/root/miniconda3/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/root/miniconda3/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/main.py", line 71, in
cli.main()
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 501, in main
run()
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 351, in run_file
runpy.run_path(target, run_name="main")
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 310, in run_path
return _run_module_code(code, init_globals, run_name, pkg_name=pkg_name, script_name=fname)
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 127, in _run_module_code
_run_code(code, mod_globals, init_globals, mod_name, mod_spec, pkg_name, script_name)
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 118, in _run_code
exec(code, run_globals)
File "/root/autodl-tmp/UniHuman/code/infer.py", line 488, in
result=process('Pose Transfer',opt.seed,src_image,tgt_image,clothes=None,prompt=None,tryon_cat=None,edit_cat=None,ug_scale=opt.cfg_scale)
File "/root/autodl-tmp/UniHuman/code/infer.py", line 440, in process
results=model.edit_human(batch,ug_scale=ug_scale,task=task,ddim_steps=50)
File "/root/autodl-tmp/UniHuman/code/unihuman.py", line 32, in edit_human
results=self.model.log_images(batch,N=len(batch),sample=False,unconditional_guidance_scale=ug_scale,ddim_steps=ddim_steps,
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/cldm/model512.py", line 645, in log_images
samples_cfg, _ = self.sample_log(cond=cond,
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/cldm/model512.py", line 661, in sample_log
samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs)
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/ldm/models/diffusion/ddim.py", line 106, in sample
samples, intermediates = self.ddim_sampling(conditioning, size,
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/ldm/models/diffusion/ddim.py", line 168, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/ldm/models/diffusion/ddim.py", line 218, in p_sample_ddim
model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
File "/root/autodl-tmp/UniHuman/code/cldm/model512.py", line 557, in apply_model
cond_tex=self.encode_tex_images(cond_tex,seg)
File "/root/autodl-tmp/UniHuman/code/cldm/model512.py", line 491, in encode_tex_images
cls_emd=self.control_model.clip_cls_adaptor(cls_emd)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (16x768 and 512x128)
This error was prompted when I was running infer.py with
parser = argparse.ArgumentParser()
parser.add_argument("--src-img-list", type=str, default='/root/autodl-tmp/UniHuman/code/source_img_paths.txt')
parser.add_argument("--tgt-img-list", type=str, default='/root/autodl-tmp/UniHuman/code/tgt_img_paths.txt')
parser.add_argument("--tgt-clothes-list", type=str, default='')
parser.add_argument("--prompt-list", type=str, default='')
parser.add_argument("--task", type=str, default='reposing')
parser.add_argument("--out-dir", type=str, default='./results')
parser.add_argument("--tryon-cat", type=str, default='upper')
parser.add_argument("--edit-cat", type=str, default='upper')
parser.add_argument("--seed", type=int, default=12345)
parser.add_argument("--cfg_scale", type=int, default=2)
opt = parser.parse_args()
The text was updated successfully, but these errors were encountered:
Data shape for DDIM sampling is (1, 4, 64, 64), eta 0.0
Running DDIM Sampling with 50 timesteps
DDIM Sampler: 0%| | 0/50 [00:00<?, ?it/s]
0it [00:04, ?it/s]| | 0/50 [00:00<?, ?it/s]
Traceback (most recent call last):
File "/root/miniconda3/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/root/miniconda3/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/main.py", line 71, in
cli.main()
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 501, in main
run()
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 351, in run_file
runpy.run_path(target, run_name="main")
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 310, in run_path
return _run_module_code(code, init_globals, run_name, pkg_name=pkg_name, script_name=fname)
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 127, in _run_module_code
_run_code(code, mod_globals, init_globals, mod_name, mod_spec, pkg_name, script_name)
File "/root/.vscode-server/extensions/ms-python.debugpy-2024.12.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 118, in _run_code
exec(code, run_globals)
File "/root/autodl-tmp/UniHuman/code/infer.py", line 488, in
result=process('Pose Transfer',opt.seed,src_image,tgt_image,clothes=None,prompt=None,tryon_cat=None,edit_cat=None,ug_scale=opt.cfg_scale)
File "/root/autodl-tmp/UniHuman/code/infer.py", line 440, in process
results=model.edit_human(batch,ug_scale=ug_scale,task=task,ddim_steps=50)
File "/root/autodl-tmp/UniHuman/code/unihuman.py", line 32, in edit_human
results=self.model.log_images(batch,N=len(batch),sample=False,unconditional_guidance_scale=ug_scale,ddim_steps=ddim_steps,
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/cldm/model512.py", line 645, in log_images
samples_cfg, _ = self.sample_log(cond=cond,
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/cldm/model512.py", line 661, in sample_log
samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs)
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/ldm/models/diffusion/ddim.py", line 106, in sample
samples, intermediates = self.ddim_sampling(conditioning, size,
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/ldm/models/diffusion/ddim.py", line 168, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "/root/miniconda3/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "/root/autodl-tmp/UniHuman/code/ldm/models/diffusion/ddim.py", line 218, in p_sample_ddim
model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
File "/root/autodl-tmp/UniHuman/code/cldm/model512.py", line 557, in apply_model
cond_tex=self.encode_tex_images(cond_tex,seg)
File "/root/autodl-tmp/UniHuman/code/cldm/model512.py", line 491, in encode_tex_images
cls_emd=self.control_model.clip_cls_adaptor(cls_emd)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1130, in _call_impl
return forward_call(*input, **kwargs)
File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (16x768 and 512x128)
This error was prompted when I was running infer.py with
parser = argparse.ArgumentParser()
parser.add_argument("--src-img-list", type=str, default='/root/autodl-tmp/UniHuman/code/source_img_paths.txt')
parser.add_argument("--tgt-img-list", type=str, default='/root/autodl-tmp/UniHuman/code/tgt_img_paths.txt')
parser.add_argument("--tgt-clothes-list", type=str, default='')
parser.add_argument("--prompt-list", type=str, default='')
parser.add_argument("--task", type=str, default='reposing')
parser.add_argument("--out-dir", type=str, default='./results')
parser.add_argument("--tryon-cat", type=str, default='upper')
parser.add_argument("--edit-cat", type=str, default='upper')
parser.add_argument("--seed", type=int, default=12345)
parser.add_argument("--cfg_scale", type=int, default=2)
opt = parser.parse_args()
The text was updated successfully, but these errors were encountered: