|
pip install datasets |
|
pip install transformers |
|
|
|
from transformers import WhisperProcessor, WhisperForConditionalGeneration |
|
from datasets import load_dataset |
|
import gradio as gr |
|
|
|
|
|
processor = WhisperProcessor.from_pretrained("openai/whisper-large") |
|
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") |
|
model.config.forced_decoder_ids = None |
|
|
|
|
|
def transcribe_audio(audio_data): |
|
|
|
input_features = processor(audio_data, return_tensors="pt").input_features |
|
|
|
|
|
predicted_ids = model.generate(input_features) |
|
|
|
|
|
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) |
|
|
|
return transcription[0] |
|
|
|
|
|
audio_input = gr.Audio(preprocessing_fn=None) |
|
gr.Interface(fn=transcribe_audio, inputs=audio_input, outputs="text").launch() |
|
|