From 37a1cb35b87acdc4cf7528b8b1ed6da27d244e52 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Fri, 10 May 2024 13:47:46 +0200 Subject: [PATCH] fix --- src/transformers/models/clip/modeling_clip.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index f50df5b1b99204..14d6e7608f3892 100644 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -642,12 +642,7 @@ class CLIPEncoder(nn.Module): def __init__(self, config: CLIPConfig, attn_implementation="eager"): super().__init__() self.config = config - self.layers = nn.ModuleList( - [ - CLIPEncoderLayer(config, attn_implementation=attn_implementation) - for _ in range(config.num_hidden_layers) - ] - ) + self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(