Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix typos #65

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions MagicQuill/brushnet_nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def INPUT_TYPES(s):

def model_update(self, model, vae, image, mask, powerpaint, clip, positive, negative, fitting, function, scale, start_at, end_at, save_memory):

is_SDXL, is_PP = check_compatibilty(model, powerpaint)
is_SDXL, is_PP = check_compatibility(model, powerpaint)
if not is_PP:
raise Exception("BrushNet model was loaded, please use BrushNet node")

Expand Down Expand Up @@ -336,7 +336,7 @@ def INPUT_TYPES(s):

def model_update(self, model, vae, image, mask, brushnet, positive, negative, scale, start_at, end_at):

is_SDXL, is_PP = check_compatibilty(model, brushnet)
is_SDXL, is_PP = check_compatibility(model, brushnet)

if is_PP:
raise Exception("PowerPaint model was loaded, please use PowerPaint node")
Expand Down Expand Up @@ -623,7 +623,7 @@ def brushnet_blocks(sd):


# Check models compatibility
def check_compatibilty(model, brushnet):
def check_compatibility(model, brushnet):
is_SDXL = False
is_PP = False
if isinstance(model.model.model_config, comfy.supported_models.SD15):
Expand Down
2 changes: 1 addition & 1 deletion MagicQuill/comfy/cli_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ class LatentPreviewMethod(enum.Enum):
vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).")


parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.")
parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to aggressively offload to regular ram instead of keeping models in vram when it can.")
parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.")

parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
Expand Down
2 changes: 1 addition & 1 deletion MagicQuill/comfy/extra_samplers/uni_pc.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def cond_grad_fn(x, t_input):

def model_fn(x, t_continuous):
"""
The noise predicition model function that is used for DPM-Solver.
The noise prediction model function that is used for DPM-Solver.
"""
if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0]))
Expand Down
4 changes: 2 additions & 2 deletions MagicQuill/comfy/ldm/cascade/stage_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ class VectorQuantize(nn.Module):
def __init__(self, embedding_size, k, ema_decay=0.99, ema_loss=False):
"""
Takes an input of variable size (as long as the last dimension matches the embedding size).
Returns one tensor containing the nearest neigbour embeddings to each of the inputs,
with the same size as the input, vq and commitment components for the loss as a touple
Returns one tensor containing the nearest neighbour embeddings to each of the inputs,
with the same size as the input, vq and commitment components for the loss as a tuple
in the second output and the indices of the quantized vectors in the third:
quantized, (vq_loss, commit_loss), indices
"""
Expand Down
4 changes: 2 additions & 2 deletions MagicQuill/comfy/ldm/modules/ema.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@


class LitEma(nn.Module):
def __init__(self, model, decay=0.9999, use_num_upates=True):
def __init__(self, model, decay=0.9999, use_num_updates=True):
super().__init__()
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')

self.m_name2s_name = {}
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_updates
else torch.tensor(-1, dtype=torch.int))

for name, p in model.named_parameters():
Expand Down
2 changes: 1 addition & 1 deletion MagicQuill/comfy/model_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,7 @@ def unet_offload_device():
else:
return torch.device("cpu")

def unet_inital_load_device(parameters, dtype):
def unet_initial_load_device(parameters, dtype):
torch_dev = get_torch_device()
if vram_state == VRAMState.HIGH_VRAM:
return torch_dev
Expand Down
10 changes: 5 additions & 5 deletions MagicQuill/comfy/sd.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ def __init__(self, sd=None, device=None, config=None, dtype=None):
self.process_input = lambda audio: audio
self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32]
else:
logging.warning("WARNING: No VAE weights detected, VAE not initalized.")
logging.warning("WARNING: No VAE weights detected, VAE not initialized.")
self.first_stage_model = None
return
else:
Expand Down Expand Up @@ -521,9 +521,9 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)

if output_model:
inital_load_device = model_management.unet_inital_load_device(parameters, unet_dtype)
initial_load_device = model_management.unet_initial_load_device(parameters, unet_dtype)
offload_device = model_management.unet_offload_device()
model = model_config.get_model(sd, diffusion_model_prefix, device=inital_load_device)
model = model_config.get_model(sd, diffusion_model_prefix, device=initial_load_device)
model.load_model_weights(sd, diffusion_model_prefix)

if output_vae:
Expand Down Expand Up @@ -555,8 +555,8 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
logging.debug("left over keys: {}".format(left_over))

if output_model:
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=inital_load_device)
if inital_load_device != torch.device("cpu"):
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=initial_load_device)
if initial_load_device != torch.device("cpu"):
logging.info("loaded straight to GPU")
model_management.load_model_gpu(model_patcher)

Expand Down