Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Code optimization #28

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions ip_adapter/attention_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,14 +91,14 @@ class IPAttnProcessor(nn.Module):
The context length of the image features.
"""

def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4, skip=False):
def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4, selected=True):
super().__init__()

self.hidden_size = hidden_size
self.cross_attention_dim = cross_attention_dim
self.scale = scale
self.num_tokens = num_tokens
self.skip = skip
self.selected = selected

self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
Expand Down Expand Up @@ -155,7 +155,7 @@ def __call__(
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)

if not self.skip:
if self.selected:
# for ip-adapter
ip_key = self.to_k_ip(ip_hidden_states)
ip_value = self.to_v_ip(ip_hidden_states)
Expand Down Expand Up @@ -289,7 +289,7 @@ class IPAttnProcessor2_0(torch.nn.Module):
The context length of the image features.
"""

def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4, skip=False):
def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4, selected=True):
super().__init__()

if not hasattr(F, "scaled_dot_product_attention"):
Expand All @@ -299,7 +299,7 @@ def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=
self.cross_attention_dim = cross_attention_dim
self.scale = scale
self.num_tokens = num_tokens
self.skip = skip
self.selected = selected

self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
Expand Down Expand Up @@ -370,7 +370,7 @@ def __call__(
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
hidden_states = hidden_states.to(query.dtype)

if not self.skip:
if self.selected:
# for ip-adapter
ip_key = self.to_k_ip(ip_hidden_states)
ip_value = self.to_v_ip(ip_hidden_states)
Expand Down
22 changes: 7 additions & 15 deletions ip_adapter/ip_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,21 +113,13 @@ def set_ip_adapter(self):
if block_name in name:
selected = True
break
if selected:
attn_procs[name] = IPAttnProcessor(
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
scale=1.0,
num_tokens=self.num_tokens,
).to(self.device, dtype=torch.float16)
else:
attn_procs[name] = IPAttnProcessor(
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
scale=1.0,
num_tokens=self.num_tokens,
skip=True
).to(self.device, dtype=torch.float16)
attn_procs[name] = IPAttnProcessor(
hidden_size=hidden_size,
cross_attention_dim=cross_attention_dim,
scale=1.0,
num_tokens=self.num_tokens,
selected=selected
).to(self.device, dtype=torch.float16)
unet.set_attn_processor(attn_procs)
if hasattr(self.pipe, "controlnet"):
if isinstance(self.pipe.controlnet, MultiControlNetModel):
Expand Down