From 762f138e9e31fa88bda7f1c6693c66b622de4116 Mon Sep 17 00:00:00 2001 From: Joshua Lochner Date: Fri, 1 Dec 2023 16:48:26 +0200 Subject: [PATCH 1/2] [CLAP] Replace hard-coded batch size to enable dynamic ONNX export --- src/transformers/models/clap/modeling_clap.py | 20 +++++-------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/src/transformers/models/clap/modeling_clap.py b/src/transformers/models/clap/modeling_clap.py index bea7cf2b93ccbd..84a822bac382b0 100644 --- a/src/transformers/models/clap/modeling_clap.py +++ b/src/transformers/models/clap/modeling_clap.py @@ -89,24 +89,14 @@ def window_partition(hidden_states, window_size): return windows -# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L263 def window_reverse(windows, window_size, height, width): """ - Args: - windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`): - Input windows - window_size (`int`): - Window size - height (`int`): - Height of the resized audio - width (`int`): - Width of the resized audio + Merges windows to produce higher resolution features. """ - batch_size = int(windows.shape[0] / (height * width / window_size / window_size)) - - hidden_states = windows.view(batch_size, height // window_size, width // window_size, window_size, window_size, -1) - hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(batch_size, height, width, -1) - return hidden_states + num_channels = windows.shape[-1] + windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) + windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) + return windows # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids From c00efa1b9503706a065936b588c37b229c790123 Mon Sep 17 00:00:00 2001 From: Joshua Lochner Date: Wed, 6 Dec 2023 12:53:30 +0200 Subject: [PATCH 2/2] Add back docstring --- src/transformers/models/clap/modeling_clap.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/transformers/models/clap/modeling_clap.py b/src/transformers/models/clap/modeling_clap.py index 84a822bac382b0..b2997e1d49353f 100644 --- a/src/transformers/models/clap/modeling_clap.py +++ b/src/transformers/models/clap/modeling_clap.py @@ -89,9 +89,19 @@ def window_partition(hidden_states, window_size): return windows +# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L263 def window_reverse(windows, window_size, height, width): """ Merges windows to produce higher resolution features. + Args: + windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`): + Input windows + window_size (`int`): + Window size + height (`int`): + Height of the resized audio + width (`int`): + Width of the resized audio """ num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)