Skip to content

Commit

Permalink
mem masks should be on the continuous transformer wrapper
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Jan 16, 2024
1 parent f185da8 commit aa380f1
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 2 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
setup(
name = 'x-transformers',
packages = find_packages(exclude=['examples']),
version = '1.27.6',
version = '1.27.7',
license='MIT',
description = 'X-Transformers - Pytorch',
author = 'Phil Wang',
Expand Down
3 changes: 2 additions & 1 deletion x_transformers/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ def forward(
mask = None,
return_attn = False,
mems = None,
mem_masks = None,
pos = None,
prepend_embeds = None,
prepend_mask = None,
Expand Down Expand Up @@ -125,7 +126,7 @@ def forward(

# attention layers

x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
x, intermediates = self.attn_layers(x, mask = mask, mems = mems, mem_masks = mem_masks, return_hiddens = True, **kwargs)

# splice out memory tokens

Expand Down

0 comments on commit aa380f1

Please sign in to comment.