Skip to content

Commit

Permalink
properly call super init
Browse files Browse the repository at this point in the history
  • Loading branch information
ArthurZucker committed Jul 23, 2024
1 parent cc19ced commit da262b0
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions src/transformers/cache_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,6 +467,7 @@ class QuantizedCache(DynamicCache):
"""

def __init__(self, cache_config: QuantizedCacheConfig) -> None:
super().__init__()
self._quantized_key_cache: List[torch.Tensor] = []
self._quantized_value_cache: List[torch.Tensor] = []

Expand All @@ -478,8 +479,6 @@ def __init__(self, cache_config: QuantizedCacheConfig) -> None:
self.compute_dtype = cache_config.compute_dtype
self.device = cache_config.device

super().__init__()

def update(
self,
key_states: torch.Tensor,
Expand Down Expand Up @@ -640,6 +639,7 @@ class SinkCache(Cache):
"""

def __init__(self, window_length: int, num_sink_tokens: int) -> None:
super().__init__()
self.key_cache: List[torch.Tensor] = []
self.value_cache: List[torch.Tensor] = []
self.window_length = window_length
Expand Down Expand Up @@ -993,6 +993,7 @@ class EncoderDecoderCache(Cache):
"""

def __init__(self, self_attention_cache: Cache, cross_attention_cache: Cache):
super().__init__()
self.self_attention_cache = self_attention_cache
self.cross_attention_cache = cross_attention_cache

Expand Down

0 comments on commit da262b0

Please sign in to comment.