From aac3696571fcf28123d31fefaf4c8d2cd4245000 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Sz=C3=A9pe?= Date: Thu, 28 Nov 2024 08:40:59 +0000 Subject: [PATCH] Fix typos --- MagicQuill/brushnet_nodes.py | 6 +++--- MagicQuill/comfy/cli_args.py | 2 +- MagicQuill/comfy/extra_samplers/uni_pc.py | 2 +- MagicQuill/comfy/ldm/cascade/stage_a.py | 4 ++-- MagicQuill/comfy/ldm/modules/ema.py | 4 ++-- MagicQuill/comfy/model_management.py | 2 +- MagicQuill/comfy/sd.py | 10 +++++----- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/MagicQuill/brushnet_nodes.py b/MagicQuill/brushnet_nodes.py index 1b7a417..770f657 100644 --- a/MagicQuill/brushnet_nodes.py +++ b/MagicQuill/brushnet_nodes.py @@ -201,7 +201,7 @@ def INPUT_TYPES(s): def model_update(self, model, vae, image, mask, powerpaint, clip, positive, negative, fitting, function, scale, start_at, end_at, save_memory): - is_SDXL, is_PP = check_compatibilty(model, powerpaint) + is_SDXL, is_PP = check_compatibility(model, powerpaint) if not is_PP: raise Exception("BrushNet model was loaded, please use BrushNet node") @@ -336,7 +336,7 @@ def INPUT_TYPES(s): def model_update(self, model, vae, image, mask, brushnet, positive, negative, scale, start_at, end_at): - is_SDXL, is_PP = check_compatibilty(model, brushnet) + is_SDXL, is_PP = check_compatibility(model, brushnet) if is_PP: raise Exception("PowerPaint model was loaded, please use PowerPaint node") @@ -623,7 +623,7 @@ def brushnet_blocks(sd): # Check models compatibility -def check_compatibilty(model, brushnet): +def check_compatibility(model, brushnet): is_SDXL = False is_PP = False if isinstance(model.model.model_config, comfy.supported_models.SD15): diff --git a/MagicQuill/comfy/cli_args.py b/MagicQuill/comfy/cli_args.py index fb0d37c..2f14a7e 100644 --- a/MagicQuill/comfy/cli_args.py +++ b/MagicQuill/comfy/cli_args.py @@ -110,7 +110,7 @@ class LatentPreviewMethod(enum.Enum): vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).") -parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.") +parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to aggressively offload to regular ram instead of keeping models in vram when it can.") parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.") parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") diff --git a/MagicQuill/comfy/extra_samplers/uni_pc.py b/MagicQuill/comfy/extra_samplers/uni_pc.py index a30d1d0..1c20c47 100644 --- a/MagicQuill/comfy/extra_samplers/uni_pc.py +++ b/MagicQuill/comfy/extra_samplers/uni_pc.py @@ -321,7 +321,7 @@ def cond_grad_fn(x, t_input): def model_fn(x, t_continuous): """ - The noise predicition model function that is used for DPM-Solver. + The noise prediction model function that is used for DPM-Solver. """ if t_continuous.reshape((-1,)).shape[0] == 1: t_continuous = t_continuous.expand((x.shape[0])) diff --git a/MagicQuill/comfy/ldm/cascade/stage_a.py b/MagicQuill/comfy/ldm/cascade/stage_a.py index ca8867e..d1a2247 100644 --- a/MagicQuill/comfy/ldm/cascade/stage_a.py +++ b/MagicQuill/comfy/ldm/cascade/stage_a.py @@ -56,8 +56,8 @@ class VectorQuantize(nn.Module): def __init__(self, embedding_size, k, ema_decay=0.99, ema_loss=False): """ Takes an input of variable size (as long as the last dimension matches the embedding size). - Returns one tensor containing the nearest neigbour embeddings to each of the inputs, - with the same size as the input, vq and commitment components for the loss as a touple + Returns one tensor containing the nearest neighbour embeddings to each of the inputs, + with the same size as the input, vq and commitment components for the loss as a tuple in the second output and the indices of the quantized vectors in the third: quantized, (vq_loss, commit_loss), indices """ diff --git a/MagicQuill/comfy/ldm/modules/ema.py b/MagicQuill/comfy/ldm/modules/ema.py index bded250..29fbd64 100644 --- a/MagicQuill/comfy/ldm/modules/ema.py +++ b/MagicQuill/comfy/ldm/modules/ema.py @@ -3,14 +3,14 @@ class LitEma(nn.Module): - def __init__(self, model, decay=0.9999, use_num_upates=True): + def __init__(self, model, decay=0.9999, use_num_updates=True): super().__init__() if decay < 0.0 or decay > 1.0: raise ValueError('Decay must be between 0 and 1') self.m_name2s_name = {} self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) - self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates + self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_updates else torch.tensor(-1, dtype=torch.int)) for name, p in model.named_parameters(): diff --git a/MagicQuill/comfy/model_management.py b/MagicQuill/comfy/model_management.py index 0471932..5d5f813 100644 --- a/MagicQuill/comfy/model_management.py +++ b/MagicQuill/comfy/model_management.py @@ -505,7 +505,7 @@ def unet_offload_device(): else: return torch.device("cpu") -def unet_inital_load_device(parameters, dtype): +def unet_initial_load_device(parameters, dtype): torch_dev = get_torch_device() if vram_state == VRAMState.HIGH_VRAM: return torch_dev diff --git a/MagicQuill/comfy/sd.py b/MagicQuill/comfy/sd.py index cfbf8fa..6b8db4c 100644 --- a/MagicQuill/comfy/sd.py +++ b/MagicQuill/comfy/sd.py @@ -248,7 +248,7 @@ def __init__(self, sd=None, device=None, config=None, dtype=None): self.process_input = lambda audio: audio self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32] else: - logging.warning("WARNING: No VAE weights detected, VAE not initalized.") + logging.warning("WARNING: No VAE weights detected, VAE not initialized.") self.first_stage_model = None return else: @@ -521,9 +521,9 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True) if output_model: - inital_load_device = model_management.unet_inital_load_device(parameters, unet_dtype) + initial_load_device = model_management.unet_initial_load_device(parameters, unet_dtype) offload_device = model_management.unet_offload_device() - model = model_config.get_model(sd, diffusion_model_prefix, device=inital_load_device) + model = model_config.get_model(sd, diffusion_model_prefix, device=initial_load_device) model.load_model_weights(sd, diffusion_model_prefix) if output_vae: @@ -555,8 +555,8 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o logging.debug("left over keys: {}".format(left_over)) if output_model: - model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=inital_load_device) - if inital_load_device != torch.device("cpu"): + model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=initial_load_device) + if initial_load_device != torch.device("cpu"): logging.info("loaded straight to GPU") model_management.load_model_gpu(model_patcher)