Skip to content

Commit

Permalink
Fix style
Browse files Browse the repository at this point in the history
  • Loading branch information
NielsRogge committed Nov 20, 2023
1 parent 874c9de commit 6713474
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 17 deletions.
14 changes: 1 addition & 13 deletions src/transformers/models/siglip/configuration_siglip.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ def __init__(
vocab_size=49408,
hidden_size=512,
intermediate_size=2048,
projection_dim=512,
num_hidden_layers=12,
num_attention_heads=8,
max_position_embeddings=64,
Expand All @@ -115,7 +114,6 @@ def __init__(
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
Expand Down Expand Up @@ -201,7 +199,6 @@ def __init__(
self,
hidden_size=768,
intermediate_size=3072,
projection_dim=512,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
Expand All @@ -218,7 +215,6 @@ def __init__(

self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
Expand Down Expand Up @@ -264,10 +260,6 @@ class SiglipConfig(PretrainedConfig):
Dictionary of configuration options used to initialize [`SiglipTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`SiglipVisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimentionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The inital value of the *logit_scale* paramter. Default is used as per the original Siglip implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
Expand Down Expand Up @@ -297,9 +289,7 @@ class SiglipConfig(PretrainedConfig):

model_type = "siglip"

def __init__(
self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
):
def __init__(self, text_config=None, vision_config=None, **kwargs):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
Expand Down Expand Up @@ -381,8 +371,6 @@ def __init__(
self.text_config = SiglipTextConfig(**text_config)
self.vision_config = SiglipVisionConfig(**vision_config)

self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0

@classmethod
Expand Down
6 changes: 2 additions & 4 deletions src/transformers/models/siglip/tokenization_siglip.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@


import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
Expand Down Expand Up @@ -156,7 +155,6 @@ def get_spm_processor(self):
tokenizer.LoadFromSerializedProto(sp_model)
return tokenizer


@property
def vocab_size(self):
return self.sp_model.get_piece_size()
Expand Down Expand Up @@ -209,8 +207,8 @@ def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. Siglip does not make
use of token type ids, therefore a list of zeros is returned.
Create a mask from the two sequences passed to be used in a sequence-pair classification task. Siglip does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
Expand Down

0 comments on commit 6713474

Please sign in to comment.