Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Transformers 4.48 #2158

Merged
merged 26 commits into from
Jan 29, 2025
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
5190280
test
IlyasMoutawwakil Jan 16, 2025
6a03d76
testing tensor cache x)
IlyasMoutawwakil Jan 20, 2025
7207215
fix logger
IlyasMoutawwakil Jan 20, 2025
6261094
condition cache class usage
IlyasMoutawwakil Jan 20, 2025
822066d
update opset for beit and data2vec vision and skip flattened/fused pk…
IlyasMoutawwakil Jan 20, 2025
3ab38fd
style
IlyasMoutawwakil Jan 20, 2025
d713e5a
fix args patcher
IlyasMoutawwakil Jan 20, 2025
bf4d1f3
fix modernbert testing
IlyasMoutawwakil Jan 20, 2025
230c3a0
adaot to new whisper returned generation length
IlyasMoutawwakil Jan 20, 2025
3d5d9c9
fix is_causal in transformers
IlyasMoutawwakil Jan 20, 2025
96e2714
fix modernbert failures
IlyasMoutawwakil Jan 20, 2025
78a2dba
style
IlyasMoutawwakil Jan 20, 2025
967c6e2
traceable cache
IlyasMoutawwakil Jan 20, 2025
1d74388
use pkv index
IlyasMoutawwakil Jan 24, 2025
d452c46
add version gard and clean up other model patcher version gards
IlyasMoutawwakil Jan 24, 2025
5dcab7f
patch sdpa attention in optimum for now
IlyasMoutawwakil Jan 24, 2025
656941a
remove modernbert condition
IlyasMoutawwakil Jan 24, 2025
1bcb38f
style
IlyasMoutawwakil Jan 24, 2025
23fa20e
fix MistralModelPatcher
IlyasMoutawwakil Jan 24, 2025
24c8f4b
correctly patch gpt2 in vision encoder decoder
IlyasMoutawwakil Jan 24, 2025
3694ea4
patch sdpa attention forward everywhere
IlyasMoutawwakil Jan 26, 2025
3d7d586
fix gpt2 cross attention in seq2seq as well
IlyasMoutawwakil Jan 26, 2025
10833d8
moved traceable cache to a file for simplicity of model patcher
IlyasMoutawwakil Jan 29, 2025
9491d17
Apply suggestions from code review
IlyasMoutawwakil Jan 29, 2025
2b73129
style
IlyasMoutawwakil Jan 29, 2025
dea98a0
fix
IlyasMoutawwakil Jan 29, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
92 changes: 92 additions & 0 deletions optimum/exporters/onnx/_traceable_cache.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
from typing import Any, Dict, Optional, Tuple

import torch
from transformers.cache_utils import logger


# Simply removing the nn.Module, same as in https://github.com/huggingface/transformers/pull/35873
class TraceableCache:
"""
Base, abstract class for all caches. The actual data structure is specific to each subclass.
"""

def __init__(self):
super().__init__()

def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.

Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`Dict[str, Any]`, `optional`):
Additional arguments for the cache subclass. These are specific to each subclass and allow new types of
cache to be created.

Return:
A tuple containing the updated key and value states.
"""
raise NotImplementedError("Make sure to implement `update` in a subclass.")

def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
# TODO: deprecate this function in favor of `cache_position`
raise NotImplementedError("Make sure to implement `get_seq_length` in a subclass.")

# Deprecate in favor of max-cache-shape because we want to be specifc by what we mean with "max_length"
# Prev some cache objects didn't have "max_length" (SlidingWindowCache or SinkCache) because the cache object technically handles
# infinite amount of tokens. In the codebase what we really need to check is the max capacity of certain cache instances, so
# we change naming to be more explicit
def get_max_length(self) -> Optional[int]:
logger.warning_once(
"`get_max_cache()` is deprecated for all Cache classes. Use `get_max_cache_shape()` instead. "
"Calling `get_max_cache()` will raise error from v4.48"
)
return self.get_max_cache_shape()

def get_max_cache_shape(self) -> Optional[int]:
"""Returns the maximum sequence length (i.e. max capacity) of the cache object"""
raise NotImplementedError("Make sure to implement `get_max_cache_shape` in a subclass.")

def get_usable_length(self, new_seq_length: int, layer_idx: Optional[int] = 0) -> int:
"""Given the sequence length of the new inputs, returns the usable length of the cache."""
# Cache without size limit -> all cache is usable
# Cache with size limit -> if the length cache plus the length of the new inputs is larger the maximum cache
# length, we will need to evict part of the cache (and thus not all cache is usable)
max_length = self.get_max_cache_shape()
previous_seq_length = self.get_seq_length(layer_idx)
if max_length is not None and previous_seq_length + new_seq_length > max_length:
return max_length - new_seq_length
return previous_seq_length

def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
for layer_idx in range(len(self.key_cache)):
if self.key_cache[layer_idx] != []:
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
if self.value_cache[layer_idx] != []:
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))

@property
def seen_tokens(self):
logger.warning_once(
"The `seen_tokens` attribute is deprecated and will be removed in v4.41. Use the `cache_position` "
"model input instead."
)
if hasattr(self, "_seen_tokens"):
return self._seen_tokens
else:
return None
7 changes: 3 additions & 4 deletions optimum/exporters/onnx/model_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -843,7 +843,7 @@ class DeiTOnnxConfig(ViTOnnxConfig):


class BeitOnnxConfig(ViTOnnxConfig):
DEFAULT_ONNX_OPSET = 11
DEFAULT_ONNX_OPSET = 14 # now uses F.scaled_dot_product_attention by default for torch>=2.1.1.


class ConvNextOnnxConfig(ViTOnnxConfig):
Expand Down Expand Up @@ -1573,13 +1573,12 @@ class Data2VecTextOnnxConfig(DistilBertOnnxConfig):


class Data2VecVisionOnnxConfig(ViTOnnxConfig):
DEFAULT_ONNX_OPSET = 11
DEFAULT_ONNX_OPSET = 14 # now uses F.scaled_dot_product_attention by default for torch>=2.1.1.


class Data2VecAudioOnnxConfig(AudioOnnxConfig):
NORMALIZED_CONFIG_CLASS = NormalizedConfig
ATOL_FOR_VALIDATION = 1e-4
DEFAULT_ONNX_OPSET = 14 # now uses F.scaled_dot_product_attention by default for torch>=2.1.1.
NORMALIZED_CONFIG_CLASS = NormalizedConfig


class PerceiverDummyInputGenerator(DummyVisionInputGenerator):
Expand Down
Loading
Loading