import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor import gradio as gr import librosa MODEL_NAME = "EwoutLagendijk/whisper-small-indonesian" BATCH_SIZE = 8 device = 0 if torch.cuda.is_available() else "cpu" # Load model and processor model = AutoModelForSpeechSeq2Seq.from_pretrained(MODEL_NAME) processor = AutoProcessor.from_pretrained(MODEL_NAME) # Update the generation config for transcription model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="id", task="transcribe") model.config.no_repeat_ngram_size = 3 def transcribe_speech_with_timestamps(filepath): # Load the audio audio, sampling_rate = librosa.load(filepath, sr=16000) # Define chunk size (e.g., 30 seconds) chunk_duration = 30 # in seconds chunk_samples = chunk_duration * sampling_rate # Process audio in chunks transcription = [] for i in range(0, len(audio), chunk_samples): chunk = audio[i:i + chunk_samples] chunk_start_time = i / sampling_rate # Calculate chunk start time in seconds # Convert the chunk into input features inputs = processor(audio=chunk, sampling_rate=16000, return_tensors="pt").input_features # Generate transcription for the chunk generated_ids = model.generate( inputs, max_new_tokens=444, return_dict_in_generate=True, output_scores=False, output_attentions=False, output_hidden_states=False, forced_decoder_ids=processor.get_decoder_prompt_ids(language="id", task="transcribe") ) # Decode the tokens into text and timestamps token_transcriptions = processor.batch_decode(generated_ids["sequences"], skip_special_tokens=False)[0] decoded_with_timestamps = processor.decode_with_timestamps(generated_ids["sequences"][0]) # Parse timestamps and transcription for segment in decoded_with_timestamps: start_time = chunk_start_time + segment['start'] end_time = chunk_start_time + segment['end'] text = segment['text'] transcription.append(f"[{start_time:.2f}s - {end_time:.2f}s]: {text}") return "\n".join(transcription) demo = gr.Blocks() mic_transcribe = gr.Interface( fn=transcribe_speech_with_timestamps, inputs=gr.Audio(sources="microphone", type="filepath"), outputs=gr.Textbox(lines=10, label="Transcription with Timestamps"), ) file_transcribe = gr.Interface( fn=transcribe_speech_with_timestamps, inputs=gr.Audio(sources="upload", type="filepath"), outputs=gr.Textbox(lines=10, label="Transcription with Timestamps"), ) with demo: gr.TabbedInterface([mic_transcribe, file_transcribe], ["Transcribe Microphone", "Transcribe Audio File"]) demo.launch(debug=True)