From dffd12506d50f0540b8a7f4b36a05d4fb5fed2de Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sun, 12 Jan 2025 19:55:45 +0200 Subject: [PATCH] clarify print --- nodes.py | 4 ++-- pyproject.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nodes.py b/nodes.py index da3528c..cac283f 100644 --- a/nodes.py +++ b/nodes.py @@ -96,7 +96,7 @@ def loadmodel(self, model, precision, attention, lora=None): local_dir=model_path, local_dir_use_symlinks=False) - print(f"using {attention} for attention") + print(f"Florence2 using {attention} for attention") with patch("transformers.dynamic_module_utils.get_imports", fixed_get_imports): #workaround for unnecessary flash_attn requirement model = AutoModelForCausalLM.from_pretrained(model_path, attn_implementation=attention, device_map=device, torch_dtype=dtype,trust_remote_code=True) processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True) @@ -172,7 +172,7 @@ def loadmodel(self, model, precision, attention, lora=None): dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[precision] model_path = Path(folder_paths.models_dir, "LLM", model) print(f"Loading model from {model_path}") - print(f"using {attention} for attention") + print(f"Florence2 using {attention} for attention") with patch("transformers.dynamic_module_utils.get_imports", fixed_get_imports): #workaround for unnecessary flash_attn requirement model = AutoModelForCausalLM.from_pretrained(model_path, attn_implementation=attention, device_map=device, torch_dtype=dtype,trust_remote_code=True) processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True) diff --git a/pyproject.toml b/pyproject.toml index 09f2fc8..222c0b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "comfyui-florence2" description = "Nodes to use Florence2 VLM for image vision tasks: object detection, captioning, segmentation and ocr" -version = "1.0.2" +version = "1.0.3" license = "MIT" dependencies = ["transformers>=4.38.0"]