From b5da5e87a46c601597684fe02e9ae1371c0b8ab3 Mon Sep 17 00:00:00 2001 From: Mahmoud Ashraf Date: Fri, 15 Nov 2024 14:54:26 +0300 Subject: [PATCH] use correct seek for sequential and batched segments --- faster_whisper/transcribe.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/faster_whisper/transcribe.py b/faster_whisper/transcribe.py index 0ae454da..80e5d92c 100644 --- a/faster_whisper/transcribe.py +++ b/faster_whisper/transcribe.py @@ -174,7 +174,9 @@ def forward(self, features, chunks_metadata, **forward_params): compression_ratio=get_compression_ratio( self.tokenizer.decode(subsegment["tokens"]) ), - seek=seek, + seek=int( + chunk_metadata["start_time"] * self.model.frames_per_second + ), ) for subsegment in subsegments ] @@ -497,7 +499,7 @@ def _batched_segments_generator( for segment in result: seg_idx += 1 yield Segment( - seek=int(result[-1]["end"] * self.model.frames_per_second), + seek=segment["seek"], id=seg_idx, text=segment["text"], start=round(segment["start"], 3), @@ -1319,7 +1321,7 @@ def next_words_segment(segments: List[dict]) -> Optional[dict]: yield Segment( id=idx, - seek=seek, + seek=previous_seek, start=segment["start"], end=segment["end"], text=text, @@ -1586,11 +1588,7 @@ def add_word_timestamps( for segment_idx, segment in enumerate(segments): word_index = 0 - time_offset = ( - segment[0]["seek"] - * self.feature_extractor.hop_length - / self.feature_extractor.sampling_rate - ) + time_offset = segment[0]["seek"] / self.frames_per_second median_duration, max_duration = median_max_durations[segment_idx] for subsegment_idx, subsegment in enumerate(segment): saved_tokens = 0