We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
File "D:\Project\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "D:\Project\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(**inputs)) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 591, in doit enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 547, in enhance_face DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 323, in do_detail enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\core.py", line 364, in enhance_detail refined_latent = impact_sampling.ksampler_wrapper(model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 243, in ksampler_wrapper refined_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 216, in separated_sample res = sample_with_custom_noise(model, add_noise, seed, cfg, positive, negative, impact_sampler, sigmas, latent_image, noise=noise, callback=callback) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 160, in sample_with_custom_noise samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 420, in motion_sample return orig_comfy_sample(model, noise, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\sampling.py", line 116, in acn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 117, in uncond_multiplier_check_cn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\comfy\sample.py", line 48, in sample_custom samples = comfy.samplers.sample(model, noise, positive, negative, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 753, in sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 740, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 719, in inner_sample samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-TiledDiffusion\utils.py", line 34, in KSAMPLER_sample return orig_fn(*args, **kwargs) File "D:\Project\ComfyUI\comfy\samplers.py", line 624, in sample samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "D:\Project\ComfyUI\comfy\k_diffusion\sampling.py", line 155, in sample_euler denoised = model(x, sigma_hat * s_in, **extra_args) File "D:\Project\ComfyUI\comfy\samplers.py", line 299, in __call__ out = self.inner_model(x, sigma, model_options=model_options, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 706, in __call__ return self.predict_noise(*args, **kwargs) File "D:\Project\ComfyUI\comfy\samplers.py", line 709, in predict_noise return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 65, in sampling_function_patched cfg_result = x - model_options["sampler_cfg_function"](args) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 645, in automatic_cfg_function cond_pred = experimental_functions(cond_pred, cond_exp_method, cond_exp_value, cond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_positive, args, model_options_copy, eval_string_cond) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 457, in experimental_functions cond = comfy.samplers.calc_cond_batch(args["model"], [cond_to_use], args["input"], args["timestep"], tmp_model_options)[0] File "D:\Project\ComfyUI\comfy\samplers.py", line 228, in calc_cond_batch output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 69, in apply_model_uncond_cleanup_wrapper return orig_apply_model(self, *args, **kwargs) File "D:\Project\ComfyUI\comfy\model_base.py", line 145, in apply_model model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Kolors-MZ\hook_comfyui_kolors_v2.py", line 71, in forward result = super().forward(*args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\SeargeSDXL\modules\custom_sdxl_ksampler.py", line 71, in new_unet_forward x0 = old_unet_forward(self, x, timesteps, context, y, control, transformer_options, **kwargs) File "D:\Project\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward emb = emb + self.label_emb(y) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward input = module(input) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward input = module(input) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\comfy\ops.py", line 70, in forward return super().forward(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\linear.py", line 116, in forward return F.linear(input, self.weight, self.bias)
2024-11-28T10:23:30.388814 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-PixtralLlamaVision 2024-11-28T10:23:30.388814 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-Florence2 2024-11-28T10:23:30.388814 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI_ExtraModels 2024-11-28T10:23:30.389864 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-Allor 2024-11-28T10:23:30.389864 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-SUPIR 2024-11-28T10:23:30.389864 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\img2txt-comfyui-nodes 2024-11-28T10:23:30.389864 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-GGUF 2024-11-28T10:23:30.389864 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI_Qwen2-VL-Instruct 2024-11-28T10:23:30.389864 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\Flux-Prompt-Generator 2024-11-28T10:23:30.389864 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\Derfuu_ComfyUI_ModdedNodes 2024-11-28T10:23:30.390880 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved 2024-11-28T10:23:30.390880 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-KwaiKolorsWrapper 2024-11-28T10:23:30.390880 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI_bnb_nf4_fp4_Loaders 2024-11-28T10:23:30.390880 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\comfyui-workspace-manager 2024-11-28T10:23:30.390880 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\rgthree-comfy 2024-11-28T10:23:30.390880 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI_Comfyroll_CustomNodes 2024-11-28T10:23:30.390880 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-PyramidFlowWrapper 2024-11-28T10:23:30.390880 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-KJNodes 2024-11-28T10:23:30.391873 - 0.0 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-VEnhancer 2024-11-28T10:23:30.391873 - 0.1 seconds: D:\Project\ComfyUI\custom_nodes\comfyui-ollama 2024-11-28T10:23:30.391873 - 0.1 seconds: D:\Project\ComfyUI\custom_nodes\comfyui_dagthomas 2024-11-28T10:23:30.391873 - 0.1 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-Inspire-Pack 2024-11-28T10:23:30.391873 - 0.1 seconds: D:\Project\ComfyUI\custom_nodes\Fooocus_Nodes 2024-11-28T10:23:30.391873 - 0.1 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-ControlNeXt-SVD 2024-11-28T10:23:30.391873 - 0.1 seconds: D:\Project\ComfyUI\custom_nodes\PuLID_ComfyUI 2024-11-28T10:23:30.391873 - 0.1 seconds: D:\Project\ComfyUI\custom_nodes\comfyui-tensorops 2024-11-28T10:23:30.392872 - 0.2 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-SAM2 2024-11-28T10:23:30.392872 - 0.2 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-Fluxpromptenhancer 2024-11-28T10:23:30.392872 - 0.2 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI_LayerStyle 2024-11-28T10:23:30.392872 - 0.3 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-tbox 2024-11-28T10:23:30.392872 - 0.3 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-BrushNet 2024-11-28T10:23:30.392872 - 0.3 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-LTXVideo 2024-11-28T10:23:30.394538 - 0.4 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-Gemini 2024-11-28T10:23:30.394538 - 0.4 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools 2024-11-28T10:23:30.394538 - 0.5 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI_Fill-Nodes 2024-11-28T10:23:30.395544 - 0.6 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-Manager 2024-11-28T10:23:30.395544 - 0.6 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-Crystools 2024-11-28T10:23:30.396529 - 0.6 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-CCSR 2024-11-28T10:23:30.396529 - 0.6 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-Easy-Use 2024-11-28T10:23:30.396529 - 0.8 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI-LLaVA-OneVision 2024-11-28T10:23:30.396529 - 0.8 seconds: D:\Project\ComfyUI\custom_nodes\SeargeSDXL 2024-11-28T10:23:30.396529 - 0.9 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI_InstantID 2024-11-28T10:23:30.396529 - 1.0 seconds: D:\Project\ComfyUI\custom_nodes\batchImg-rembg-ComfyUI-nodes 2024-11-28T10:23:30.397521 - 1.0 seconds: D:\Project\ComfyUI\custom_nodes\comfyui-reactor-node 2024-11-28T10:23:30.398029 - 1.3 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI_Custom_Nodes_AlekPet 2024-11-28T10:23:30.398029 - 1.3 seconds: D:\Project\ComfyUI\custom_nodes\ComfyUI_VLM_nodes 2024-11-28T10:23:30.398029 - 1.5 seconds: D:\Project\ComfyUI\custom_nodes\was-node-suite-comfyui 2024-11-28T10:23:30.398029 - 3.9 seconds: D:\Project\ComfyUI\custom_nodes\comfyui-art-venture 2024-11-28T10:23:30.398029 - 2024-11-28T10:23:30.418087 - Starting server 2024-11-28T10:23:30.419085 - To see the GUI go to: http://0.0.0.0:8188 2024-11-28T10:23:30.419085 - To see the GUI go to: http://[::]:8188 2024-11-28T10:23:31.319640 - FETCH DATA from: D:\Project\ComfyUI\custom_nodes\ComfyUI-Manager\extension-node-map.json2024-11-28T10:23:31.319640 - 2024-11-28T10:23:31.325759 - [DONE]2024-11-28T10:23:31.327779 - 2024-11-28T10:23:31.508885 - [ERROR] An error occurred while retrieving information for the 'IF_ChatPrompt' node. 2024-11-28T10:23:31.509268 - Traceback (most recent call last): File "D:\Project\ComfyUI\server.py", line 564, in get_object_info out[x] = node_info(x) File "D:\Project\ComfyUI\server.py", line 531, in node_info info['input'] = obj_class.INPUT_TYPES() File "D:\Project\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IFChatPromptNode.py", line 109, in INPUT_TYPES node = cls() File "D:\Project\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IFChatPromptNode.py", line 53, in __init__ self.agent_tools = self.load_agent_tools() File "D:\Project\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IFChatPromptNode.py", line 78, in load_agent_tools for filename in os.listdir(self.agents_dir): FileNotFoundError: [WinError 3] 系统找不到指定的路径。: 'D:\\Project\\ComfyUI\\input\\IF_AI\\presets\\agents' 2024-11-28T10:23:31.543923 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:23:31.543923 - 2024-11-28T10:23:31.545524 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:23:31.548026 - 2024-11-28T10:23:31.548026 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:23:31.548026 - 2024-11-28T10:23:31.550028 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:23:31.550028 - 2024-11-28T10:23:31.550458 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:23:31.550458 - 2024-11-28T10:23:31.551486 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:23:31.551486 - 2024-11-28T10:23:31.551486 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:23:31.551486 - 2024-11-28T10:23:31.552046 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:23:31.552046 - 2024-11-28T10:23:31.552046 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:23:31.552046 - 2024-11-28T10:23:31.552046 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:23:31.552046 - 2024-11-28T10:23:31.552046 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:23:31.552046 - 2024-11-28T10:23:31.552046 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:23:31.552046 - 2024-11-28T10:23:31.557823 - �[33mQualityOfLifeSuit_Omar92:�[0m:NSP ready2024-11-28T10:23:31.557823 - 2024-11-28T10:23:31.745121 - []2024-11-28T10:23:31.745121 - 2024-11-28T10:23:31.745121 - []2024-11-28T10:23:31.748123 - 2024-11-28T10:25:15.111608 - got prompt 2024-11-28T10:25:15.398099 - model weight dtype torch.bfloat16, manual cast: None 2024-11-28T10:25:15.399496 - model_type FLUX 2024-11-28T10:25:40.481254 - no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded. 2024-11-28T10:26:30.824579 - Requested to load MochiTEModel_ 2024-11-28T10:26:30.824579 - Loading 1 new model 2024-11-28T10:26:32.321164 - loaded partially 5636.8 5611.38671875 0 2024-11-28T10:26:33.856017 - Unloading models for lowram load. 2024-11-28T10:26:33.868997 - 0 models unloaded. 2024-11-28T10:26:34.513041 - Requested to load VideoVAE 2024-11-28T10:26:34.513041 - Loading 1 new model 2024-11-28T10:26:34.991155 - loaded completely 0.0 799.548433303833 True 2024-11-28T10:26:35.688506 - Requested to load LTXV 2024-11-28T10:26:35.688506 - Loading 1 new model 2024-11-28T10:26:37.843697 - loaded completely 0.0 3667.902587890625 True 2024-11-28T10:27:39.555407 - 100%|██████████████████████████████████████████████████████████████████████████████████| 20/20 [01:01<00:00, 3.14s/it]2024-11-28T10:27:39.555407 - 100%|██████████████████████████████████████████████████████████████████████████████████| 20/20 [01:01<00:00, 3.08s/it]2024-11-28T10:27:39.555407 - 2024-11-28T10:27:42.429973 - Unloading models for lowram load. 2024-11-28T10:27:42.689058 - 1 models unloaded. 2024-11-28T10:27:42.689736 - Loading 1 new model 2024-11-28T10:27:42.810433 - loaded completely 0.0 799.548433303833 True 2024-11-28T10:27:51.219355 - Prompt executed in 156.08 seconds 2024-11-28T10:33:10.277254 - FETCH DATA from: D:\Project\ComfyUI\custom_nodes\ComfyUI-Manager\extension-node-map.json2024-11-28T10:33:10.277254 - 2024-11-28T10:33:10.282999 - [DONE]2024-11-28T10:33:10.282999 - 2024-11-28T10:33:10.449711 - [ERROR] An error occurred while retrieving information for the 'IF_ChatPrompt' node. 2024-11-28T10:33:10.449711 - Traceback (most recent call last): File "D:\Project\ComfyUI\server.py", line 564, in get_object_info out[x] = node_info(x) File "D:\Project\ComfyUI\server.py", line 531, in node_info info['input'] = obj_class.INPUT_TYPES() File "D:\Project\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IFChatPromptNode.py", line 109, in INPUT_TYPES node = cls() File "D:\Project\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IFChatPromptNode.py", line 53, in __init__ self.agent_tools = self.load_agent_tools() File "D:\Project\ComfyUI\custom_nodes\ComfyUI-IF_AI_tools\IFChatPromptNode.py", line 78, in load_agent_tools for filename in os.listdir(self.agents_dir): FileNotFoundError: [WinError 3] 系统找不到指定的路径。: 'D:\\Project\\ComfyUI\\input\\IF_AI\\presets\\agents' 2024-11-28T10:33:10.465992 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:33:10.465992 - 2024-11-28T10:33:10.468867 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:33:10.468867 - 2024-11-28T10:33:10.468867 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:33:10.470001 - 2024-11-28T10:33:10.470001 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:33:10.470001 - 2024-11-28T10:33:10.471002 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:33:10.471002 - 2024-11-28T10:33:10.471999 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:33:10.471999 - 2024-11-28T10:33:10.471999 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:33:10.471999 - 2024-11-28T10:33:10.472997 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:33:10.472997 - 2024-11-28T10:33:10.472997 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:33:10.473998 - 2024-11-28T10:33:10.473998 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:33:10.473998 - 2024-11-28T10:33:10.474999 - Invalid model config for model D:\Project\ComfyUI\models\LLM\checkpoints2024-11-28T10:33:10.474999 - 2024-11-28T10:33:10.474999 - Invalid model config for model D:\Project\ComfyUI\models\LLM\LLaVA-OneVision2024-11-28T10:33:10.474999 - 2024-11-28T10:33:10.675886 - []2024-11-28T10:33:10.676886 - 2024-11-28T10:33:10.676886 - []2024-11-28T10:33:10.676886 - 2024-11-28T10:33:27.965337 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:37:17.093588 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:37:17.130336 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:37:17.163414 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:37:17.204257 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:37:17.236097 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:37:17.272175 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:37:17.315489 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:37:17.357870 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:37:17.395921 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:38:09.666766 - HTTP Request: GET http://127.0.0.1:11434/api/tags "HTTP/1.1 200 OK" 2024-11-28T10:38:35.368408 - got prompt 2024-11-28T10:38:36.700585 - Using pytorch attention in VAE 2024-11-28T10:38:36.702644 - Using pytorch attention in VAE 2024-11-28T10:38:37.748154 - torch version:2024-11-28T10:38:37.748417 - 2024-11-28T10:38:37.748417 - 2.3.0+cu1212024-11-28T10:38:37.748417 - 2024-11-28T10:39:10.386870 - WARNING: the load_unet_state_dict function has been deprecated and will be removed please switch to: load_diffusion_model_state_dict2024-11-28T10:39:10.386870 - 2024-11-28T10:39:10.397175 - input_blocks.1.0.skip_connection.weight down_blocks.0.resnets.0.conv_shortcut.weight 2024-11-28T10:39:10.397175 - input_blocks.1.0.skip_connection.bias down_blocks.0.resnets.0.conv_shortcut.bias 2024-11-28T10:39:10.397175 - input_blocks.2.0.skip_connection.weight down_blocks.0.resnets.1.conv_shortcut.weight 2024-11-28T10:39:10.397175 - input_blocks.2.0.skip_connection.bias down_blocks.0.resnets.1.conv_shortcut.bias 2024-11-28T10:39:10.397175 - input_blocks.5.0.skip_connection.weight down_blocks.1.resnets.1.conv_shortcut.weight 2024-11-28T10:39:10.397175 - input_blocks.5.0.skip_connection.bias down_blocks.1.resnets.1.conv_shortcut.bias 2024-11-28T10:39:10.397175 - input_blocks.8.0.skip_connection.weight down_blocks.2.resnets.1.conv_shortcut.weight 2024-11-28T10:39:10.397175 - input_blocks.8.0.skip_connection.bias down_blocks.2.resnets.1.conv_shortcut.bias 2024-11-28T10:39:10.397175 - input_blocks.9.0.op.weight down_blocks.2.downsamplers.0.conv.weight 2024-11-28T10:39:10.397175 - input_blocks.9.0.op.bias down_blocks.2.downsamplers.0.conv.bias 2024-11-28T10:39:10.397175 - middle_block.0.skip_connection.weight mid_block.resnets.0.conv_shortcut.weight 2024-11-28T10:39:10.397175 - middle_block.0.skip_connection.bias mid_block.resnets.0.conv_shortcut.bias 2024-11-28T10:39:10.397175 - middle_block.2.skip_connection.weight mid_block.resnets.1.conv_shortcut.weight 2024-11-28T10:39:10.397175 - middle_block.2.skip_connection.bias mid_block.resnets.1.conv_shortcut.bias 2024-11-28T10:39:10.397175 - output_blocks.8.1.conv.weight up_blocks.2.upsamplers.0.conv.weight 2024-11-28T10:39:10.397175 - output_blocks.8.1.conv.bias up_blocks.2.upsamplers.0.conv.bias 2024-11-28T10:39:10.397175 - label_emb.0.0.bias class_embedding.linear_1.bias 2024-11-28T10:39:10.397175 - label_emb.0.2.bias class_embedding.linear_2.bias 2024-11-28T10:39:10.397175 - label_emb.0.0.weight class_embedding.linear_1.weight 2024-11-28T10:39:10.397175 - label_emb.0.2.weight class_embedding.linear_2.weight 2024-11-28T10:39:10.478289 - model weight dtype torch.float16, manual cast: None 2024-11-28T10:39:10.506863 - model_type EPS 2024-11-28T10:39:23.400100 - Model maximum sigma: 29.06122398376465 / Model minimum sigma: 0.0291671603918075562024-11-28T10:39:23.400100 - 2024-11-28T10:39:23.400100 - Sampling function patched. Uncond enabled from 0.0 to 1.02024-11-28T10:39:23.400100 - 2024-11-28T10:39:23.400100 - Preset �[32mSDXL_Analog_photo_helper�[39m loaded successfully!2024-11-28T10:39:23.400100 - 2024-11-28T10:39:23.404642 - Requested to load SDXL 2024-11-28T10:39:23.404642 - Loading 1 new model 2024-11-28T10:39:24.321701 - loaded completely 0.0 4935.9311599731445 True 2024-11-28T10:39:39.461440 - 100%|██████████████████████████████████████████████████████████████████████████████████| 18/18 [00:15<00:00, 2.11it/s]2024-11-28T10:39:39.461977 - 100%|██████████████████████████████████████████████████████████████████████████████████| 18/18 [00:15<00:00, 1.20it/s]2024-11-28T10:39:39.461977 - 2024-11-28T10:39:39.463307 - Requested to load AutoencoderKL 2024-11-28T10:39:39.463307 - Loading 1 new model 2024-11-28T10:39:40.290950 - loaded completely 0.0 159.55708122253418 True 2024-11-28T10:39:41.452916 - 2024-11-28T10:39:42.070902 - 0: 640x384 1 face, 146.2ms 2024-11-28T10:39:42.071405 - Speed: 9.9ms preprocess, 146.2ms inference, 18.3ms postprocess per image at shape (1, 3, 640, 384) 2024-11-28T10:39:42.232955 - CLIP: [detailed face, detailed eyes]2024-11-28T10:39:42.232955 - 2024-11-28T10:39:42.234894 - Requested to load SD3ClipModel_ 2024-11-28T10:39:42.234894 - Loading 1 new model 2024-11-28T10:39:43.881451 - loaded completely 0.0 4541.693359375 True 2024-11-28T10:39:44.093991 - Detailer: segment upscale for ((86.57962, 111.23291)) | crop region (259, 333) x 3.0770116911258074 -> (796, 1024)2024-11-28T10:39:44.093991 - 2024-11-28T10:39:45.191327 - Requested to load SDXL 2024-11-28T10:39:45.191327 - Loading 1 new model 2024-11-28T10:39:47.221121 - loaded completely 0.0 4935.9311599731445 True 2024-11-28T10:39:47.244520 - 0%| | 0/20 [00:00<?, ?it/s]2024-11-28T10:39:47.623233 - 0%| | 0/20 [00:00<?, ?it/s]2024-11-28T10:39:47.623233 - 2024-11-28T10:39:47.671217 - !!! Exception during processing !!! mat1 and mat2 shapes cannot be multiplied (1x3584 and 5632x1280) 2024-11-28T10:39:47.681432 - Traceback (most recent call last): File "D:\Project\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "D:\Project\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(**inputs)) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 591, in doit enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 547, in enhance_face DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 323, in do_detail enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\core.py", line 364, in enhance_detail refined_latent = impact_sampling.ksampler_wrapper(model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 243, in ksampler_wrapper refined_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 216, in separated_sample res = sample_with_custom_noise(model, add_noise, seed, cfg, positive, negative, impact_sampler, sigmas, latent_image, noise=noise, callback=callback) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 160, in sample_with_custom_noise samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 420, in motion_sample return orig_comfy_sample(model, noise, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\sampling.py", line 116, in acn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 117, in uncond_multiplier_check_cn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\comfy\sample.py", line 48, in sample_custom samples = comfy.samplers.sample(model, noise, positive, negative, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 753, in sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 740, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 719, in inner_sample samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-TiledDiffusion\utils.py", line 34, in KSAMPLER_sample return orig_fn(*args, **kwargs) File "D:\Project\ComfyUI\comfy\samplers.py", line 624, in sample samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "D:\Project\ComfyUI\comfy\k_diffusion\sampling.py", line 155, in sample_euler denoised = model(x, sigma_hat * s_in, **extra_args) File "D:\Project\ComfyUI\comfy\samplers.py", line 299, in __call__ out = self.inner_model(x, sigma, model_options=model_options, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 706, in __call__ return self.predict_noise(*args, **kwargs) File "D:\Project\ComfyUI\comfy\samplers.py", line 709, in predict_noise return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 65, in sampling_function_patched cfg_result = x - model_options["sampler_cfg_function"](args) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 645, in automatic_cfg_function cond_pred = experimental_functions(cond_pred, cond_exp_method, cond_exp_value, cond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_positive, args, model_options_copy, eval_string_cond) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 457, in experimental_functions cond = comfy.samplers.calc_cond_batch(args["model"], [cond_to_use], args["input"], args["timestep"], tmp_model_options)[0] File "D:\Project\ComfyUI\comfy\samplers.py", line 228, in calc_cond_batch output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 69, in apply_model_uncond_cleanup_wrapper return orig_apply_model(self, *args, **kwargs) File "D:\Project\ComfyUI\comfy\model_base.py", line 145, in apply_model model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Kolors-MZ\hook_comfyui_kolors_v2.py", line 71, in forward result = super().forward(*args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\SeargeSDXL\modules\custom_sdxl_ksampler.py", line 71, in new_unet_forward x0 = old_unet_forward(self, x, timesteps, context, y, control, transformer_options, **kwargs) File "D:\Project\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward emb = emb + self.label_emb(y) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward input = module(input) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward input = module(input) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\comfy\ops.py", line 70, in forward return super().forward(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\linear.py", line 116, in forward return F.linear(input, self.weight, self.bias) RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x3584 and 5632x1280) 2024-11-28T10:39:47.687273 - Prompt executed in 72.30 seconds 2024-11-28T10:40:03.980062 - got prompt 2024-11-28T10:40:04.294867 - Requested to load SDXL 2024-11-28T10:40:04.294867 - Loading 1 new model 2024-11-28T10:40:19.082541 - 100%|██████████████████████████████████████████████████████████████████████████████████| 18/18 [00:14<00:00, 2.11it/s]2024-11-28T10:40:19.082541 - 100%|██████████████████████████████████████████████████████████████████████████████████| 18/18 [00:14<00:00, 1.22it/s]2024-11-28T10:40:19.082541 - 2024-11-28T10:40:19.088760 - Requested to load AutoencoderKL 2024-11-28T10:40:19.089071 - Loading 1 new model 2024-11-28T10:40:20.245170 - loaded completely 0.0 159.55708122253418 True 2024-11-28T10:41:13.841811 - 2024-11-28T10:41:13.921422 - 0: 640x384 1 face, 75.0ms 2024-11-28T10:41:13.921422 - Speed: 0.0ms preprocess, 75.0ms inference, 0.0ms postprocess per image at shape (1, 3, 640, 384) 2024-11-28T10:41:14.021614 - CLIP: [detailed face, detailed eyes]2024-11-28T10:41:14.021614 - 2024-11-28T10:41:14.021614 - Requested to load MochiTEModel_ 2024-11-28T10:41:14.021614 - Loading 1 new model 2024-11-28T10:41:16.368351 - loaded partially 5385.907494354248 5371.38671875 0 2024-11-28T10:41:17.641682 - Detailer: segment upscale for ((103.17801, 142.41855)) | crop region (309, 427) x 2.3983918543456695 -> (741, 1024)2024-11-28T10:41:17.641682 - 2024-11-28T10:41:17.711309 - Requested to load AutoencoderKL 2024-11-28T10:41:17.711309 - Loading 1 new model 2024-11-28T10:41:18.497414 - loaded completely 0.0 159.55708122253418 True 2024-11-28T10:41:18.983237 - Requested to load SDXL 2024-11-28T10:41:18.983237 - Loading 1 new model 2024-11-28T10:41:21.460634 - loaded completely 0.0 4935.9311599731445 True 2024-11-28T10:41:21.491690 - !!! Exception during processing !!! 'NoneType' object has no attribute 'shape' 2024-11-28T10:41:21.491690 - Traceback (most recent call last): File "D:\Project\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "D:\Project\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(**inputs)) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 591, in doit enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 547, in enhance_face DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 323, in do_detail enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\core.py", line 364, in enhance_detail refined_latent = impact_sampling.ksampler_wrapper(model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 243, in ksampler_wrapper refined_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 216, in separated_sample res = sample_with_custom_noise(model, add_noise, seed, cfg, positive, negative, impact_sampler, sigmas, latent_image, noise=noise, callback=callback) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 160, in sample_with_custom_noise samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 420, in motion_sample return orig_comfy_sample(model, noise, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\sampling.py", line 116, in acn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 117, in uncond_multiplier_check_cn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\comfy\sample.py", line 48, in sample_custom samples = comfy.samplers.sample(model, noise, positive, negative, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 753, in sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 740, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 715, in inner_sample self.conds = process_conds(self.inner_model, noise, self.conds, device, latent_image, denoise_mask, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 667, in process_conds conds[k] = encode_model_conds(model.extra_conds, conds[k], noise, device, k, latent_image=latent_image, denoise_mask=denoise_mask, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 578, in encode_model_conds out = model_function(**params) File "D:\Project\ComfyUI\comfy\model_base.py", line 205, in extra_conds adm = self.encode_adm(**kwargs) File "D:\Project\ComfyUI\comfy\model_base.py", line 388, in encode_adm flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0).repeat(clip_pooled.shape[0], 1) AttributeError: 'NoneType' object has no attribute 'shape' 2024-11-28T10:41:21.497016 - Prompt executed in 77.51 seconds 2024-11-28T10:42:02.413530 - got prompt 2024-11-28T10:42:02.608717 - Requested to load SDXL 2024-11-28T10:42:02.608717 - Loading 1 new model 2024-11-28T10:42:17.428726 - 100%|██████████████████████████████████████████████████████████████████████████████████| 18/18 [00:14<00:00, 2.10it/s]2024-11-28T10:42:17.428726 - 100%|██████████████████████████████████████████████████████████████████████████████████| 18/18 [00:14<00:00, 1.22it/s]2024-11-28T10:42:17.429820 - 2024-11-28T10:42:17.430714 - Requested to load AutoencoderKL 2024-11-28T10:42:17.430714 - Loading 1 new model 2024-11-28T10:42:19.036851 - loaded completely 0.0 159.55708122253418 True 2024-11-28T10:42:24.521727 - 2024-11-28T10:42:24.594618 - 0: 640x384 1 face, 55.0ms 2024-11-28T10:42:24.595617 - Speed: 0.0ms preprocess, 55.0ms inference, 4.6ms postprocess per image at shape (1, 3, 640, 384) 2024-11-28T10:42:24.744672 - CLIP: [detailed face, detailed eyes]2024-11-28T10:42:24.744672 - 2024-11-28T10:42:24.746667 - Requested to load SD3ClipModel_ 2024-11-28T10:42:24.746667 - Loading 1 new model 2024-11-28T10:42:26.147600 - loaded completely 0.0 4541.693359375 True 2024-11-28T10:42:26.371885 - Detailer: segment upscale for ((102.4639, 141.64877)) | crop region (307, 424) x 2.4151071934146624 -> (741, 1024)2024-11-28T10:42:26.371885 - 2024-11-28T10:42:27.460366 - Requested to load SDXL 2024-11-28T10:42:27.461368 - Loading 1 new model 2024-11-28T10:42:29.572470 - loaded completely 0.0 4935.9311599731445 True 2024-11-28T10:42:29.582155 - 0%| | 0/20 [00:00<?, ?it/s]2024-11-28T10:42:29.586351 - 0%| | 0/20 [00:00<?, ?it/s]2024-11-28T10:42:29.586351 - 2024-11-28T10:42:29.595935 - !!! Exception during processing !!! mat1 and mat2 shapes cannot be multiplied (1x3584 and 5632x1280) 2024-11-28T10:42:29.597098 - Traceback (most recent call last): File "D:\Project\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "D:\Project\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(**inputs)) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 591, in doit enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 547, in enhance_face DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 323, in do_detail enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\core.py", line 364, in enhance_detail refined_latent = impact_sampling.ksampler_wrapper(model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 243, in ksampler_wrapper refined_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 216, in separated_sample res = sample_with_custom_noise(model, add_noise, seed, cfg, positive, negative, impact_sampler, sigmas, latent_image, noise=noise, callback=callback) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 160, in sample_with_custom_noise samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 420, in motion_sample return orig_comfy_sample(model, noise, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\sampling.py", line 116, in acn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 117, in uncond_multiplier_check_cn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\comfy\sample.py", line 48, in sample_custom samples = comfy.samplers.sample(model, noise, positive, negative, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 753, in sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 740, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 719, in inner_sample samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-TiledDiffusion\utils.py", line 34, in KSAMPLER_sample return orig_fn(*args, **kwargs) File "D:\Project\ComfyUI\comfy\samplers.py", line 624, in sample samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "D:\Project\ComfyUI\comfy\k_diffusion\sampling.py", line 155, in sample_euler denoised = model(x, sigma_hat * s_in, **extra_args) File "D:\Project\ComfyUI\comfy\samplers.py", line 299, in __call__ out = self.inner_model(x, sigma, model_options=model_options, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 706, in __call__ return self.predict_noise(*args, **kwargs) File "D:\Project\ComfyUI\comfy\samplers.py", line 709, in predict_noise return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 65, in sampling_function_patched cfg_result = x - model_options["sampler_cfg_function"](args) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 645, in automatic_cfg_function cond_pred = experimental_functions(cond_pred, cond_exp_method, cond_exp_value, cond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_positive, args, model_options_copy, eval_string_cond) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 457, in experimental_functions cond = comfy.samplers.calc_cond_batch(args["model"], [cond_to_use], args["input"], args["timestep"], tmp_model_options)[0] File "D:\Project\ComfyUI\comfy\samplers.py", line 228, in calc_cond_batch output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 69, in apply_model_uncond_cleanup_wrapper return orig_apply_model(self, *args, **kwargs) File "D:\Project\ComfyUI\comfy\model_base.py", line 145, in apply_model model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Kolors-MZ\hook_comfyui_kolors_v2.py", line 71, in forward result = super().forward(*args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\SeargeSDXL\modules\custom_sdxl_ksampler.py", line 71, in new_unet_forward x0 = old_unet_forward(self, x, timesteps, context, y, control, transformer_options, **kwargs) File "D:\Project\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward emb = emb + self.label_emb(y) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward input = module(input) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward input = module(input) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\comfy\ops.py", line 70, in forward return super().forward(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\linear.py", line 116, in forward return F.linear(input, self.weight, self.bias) RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x3584 and 5632x1280) 2024-11-28T10:42:29.601503 - Prompt executed in 27.17 seconds 2024-11-28T10:48:22.303593 - got prompt 2024-11-28T10:48:22.585470 - 2024-11-28T10:48:22.605366 - 0: 640x384 1 face, 12.1ms 2024-11-28T10:48:22.605912 - Speed: 1.6ms preprocess, 12.1ms inference, 2.0ms postprocess per image at shape (1, 3, 640, 384) 2024-11-28T10:48:22.884321 - CLIP: [detailed face, detailed eyes]2024-11-28T10:48:22.884321 - 2024-11-28T10:48:22.884321 - Requested to load SD3ClipModel_ 2024-11-28T10:48:22.884321 - Loading 1 new model 2024-11-28T10:48:25.514873 - loaded completely 0.0 4541.693359375 True 2024-11-28T10:48:25.727239 - Detailer: segment upscale for ((102.4639, 141.64877)) | crop region (307, 424) x 2.4151071934146624 -> (741, 1024)2024-11-28T10:48:25.727239 - 2024-11-28T10:48:25.788059 - Requested to load AutoencoderKL 2024-11-28T10:48:25.788059 - Loading 1 new model 2024-11-28T10:48:26.170999 - loaded completely 0.0 159.55708122253418 True 2024-11-28T10:48:26.654043 - Requested to load SDXL 2024-11-28T10:48:26.654043 - Loading 1 new model 2024-11-28T10:48:29.140029 - loaded completely 0.0 4935.9311599731445 True 2024-11-28T10:48:29.151271 - 0%| | 0/20 [00:00<?, ?it/s]2024-11-28T10:48:29.156925 - 0%| | 0/20 [00:00<?, ?it/s]2024-11-28T10:48:29.156925 - 2024-11-28T10:48:29.162511 - !!! Exception during processing !!! mat1 and mat2 shapes cannot be multiplied (1x3584 and 5632x1280) 2024-11-28T10:48:29.162511 - Traceback (most recent call last): File "D:\Project\ComfyUI\execution.py", line 323, in execute output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 198, in get_output_data return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb) File "D:\Project\ComfyUI\execution.py", line 169, in _map_node_over_list process_inputs(input_dict, i) File "D:\Project\ComfyUI\execution.py", line 158, in process_inputs results.append(getattr(obj, func)(**inputs)) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 591, in doit enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 547, in enhance_face DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_pack.py", line 323, in do_detail enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\core.py", line 364, in enhance_detail refined_latent = impact_sampling.ksampler_wrapper(model2, seed2, steps2, cfg2, sampler_name2, scheduler2, positive2, negative2, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 243, in ksampler_wrapper refined_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 216, in separated_sample res = sample_with_custom_noise(model, add_noise, seed, cfg, positive, negative, impact_sampler, sigmas, latent_image, noise=noise, callback=callback) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\impact_sampling.py", line 160, in sample_with_custom_noise samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 420, in motion_sample return orig_comfy_sample(model, noise, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\sampling.py", line 116, in acn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 117, in uncond_multiplier_check_cn_sample return orig_comfy_sample(model, *args, **kwargs) File "D:\Project\ComfyUI\comfy\sample.py", line 48, in sample_custom samples = comfy.samplers.sample(model, noise, positive, negative, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 753, in sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 740, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 719, in inner_sample samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-TiledDiffusion\utils.py", line 34, in KSAMPLER_sample return orig_fn(*args, **kwargs) File "D:\Project\ComfyUI\comfy\samplers.py", line 624, in sample samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "D:\Project\ComfyUI\comfy\k_diffusion\sampling.py", line 155, in sample_euler denoised = model(x, sigma_hat * s_in, **extra_args) File "D:\Project\ComfyUI\comfy\samplers.py", line 299, in __call__ out = self.inner_model(x, sigma, model_options=model_options, seed=seed) File "D:\Project\ComfyUI\comfy\samplers.py", line 706, in __call__ return self.predict_noise(*args, **kwargs) File "D:\Project\ComfyUI\comfy\samplers.py", line 709, in predict_noise return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 65, in sampling_function_patched cfg_result = x - model_options["sampler_cfg_function"](args) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 645, in automatic_cfg_function cond_pred = experimental_functions(cond_pred, cond_exp_method, cond_exp_value, cond_exp_normalize, self.previous_cond_pred, previous_sigma, sigma.item(), sigmax, attention_modifiers_positive, args, model_options_copy, eval_string_cond) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-AutomaticCFG\nodes.py", line 457, in experimental_functions cond = comfy.samplers.calc_cond_batch(args["model"], [cond_to_use], args["input"], args["timestep"], tmp_model_options)[0] File "D:\Project\ComfyUI\comfy\samplers.py", line 228, in calc_cond_batch output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Advanced-ControlNet\adv_control\utils.py", line 69, in apply_model_uncond_cleanup_wrapper return orig_apply_model(self, *args, **kwargs) File "D:\Project\ComfyUI\comfy\model_base.py", line 145, in apply_model model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\ComfyUI-Kolors-MZ\hook_comfyui_kolors_v2.py", line 71, in forward result = super().forward(*args, **kwargs) File "D:\Project\ComfyUI\custom_nodes\SeargeSDXL\modules\custom_sdxl_ksampler.py", line 71, in new_unet_forward x0 = old_unet_forward(self, x, timesteps, context, y, control, transformer_options, **kwargs) File "D:\Project\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward emb = emb + self.label_emb(y) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward input = module(input) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward input = module(input) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, **kwargs) File "D:\Project\ComfyUI\comfy\ops.py", line 70, in forward return super().forward(*args, **kwargs) File "D:\Project\ComfyUI\.venv\lib\site-packages\torch\nn\modules\linear.py", line 116, in forward return F.linear(input, self.weight, self.bias) RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x3584 and 5632x1280) 2024-11-28T10:48:29.162511 - Prompt executed in 6.84 seconds
Please make sure that workflow does not contain any sensitive information such as API keys or passwords.
Workflow too large. Please manually upload the workflow from local file system.
(Please add any additional context or steps to reproduce the error here)
The text was updated successfully, but these errors were encountered:
No branches or pull requests
ComfyUI Error Report
Error Details
Stack Trace
System Information
Devices
Logs
Attached Workflow
Please make sure that workflow does not contain any sensitive information such as API keys or passwords.
Additional Context
(Please add any additional context or steps to reproduce the error here)
The text was updated successfully, but these errors were encountered: