Skip to content

Commit

Permalink
Fix different length for numpy>=1.24.x (#8655)
Browse files Browse the repository at this point in the history
* fix different length for numpy>=1.24.x

* update numpy version
  • Loading branch information
DrownFish19 authored Jun 25, 2024
1 parent 7130c18 commit 381949d
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions tests/transformers/speecht5/test_feature_extraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,17 +155,17 @@ def test_call(self):
encoded_sequences_2 = feat_extract(np_speech_inputs[0], return_tensors="np").input_values
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))

# Test batched
encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values
encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values
# Test batched, adding padding=True for numpy version >=1.24.x
encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np", padding=True).input_values
encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np", padding=True).input_values
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))

def test_zero_mean_unit_variance_normalization_np(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]

paddings = ["longest", "max_length", "do_not_pad"]
paddings = ["longest", "max_length"] # "do_not_pad" removed for numpy version >=1.24.x
max_lengths = [None, 1600, None]
for max_length, padding in zip(max_lengths, paddings):
processed = feat_extract(speech_inputs, padding=padding, max_length=max_length, return_tensors="np")
Expand All @@ -182,7 +182,7 @@ def test_zero_mean_unit_variance_normalization(self):
lengths = range(800, 1400, 200)
speech_inputs = [floats_list((1, x))[0] for x in lengths]

paddings = ["longest", "max_length", "do_not_pad"]
paddings = ["longest", "max_length"] # "do_not_pad" removed for numpy version >=1.24.x
max_lengths = [None, 1600, None]

for max_length, padding in zip(max_lengths, paddings):
Expand Down Expand Up @@ -261,9 +261,9 @@ def test_call_target(self):
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_values
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))

# Test batched
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_values
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_values
# Test batched, adding padding=True for numpy version >=1.24.x
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np", padding=True).input_values
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np", padding=True).input_values
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))

Expand Down

0 comments on commit 381949d

Please sign in to comment.