|
import subprocess |
|
|
|
subprocess.run(["pip", "install", "datasets"]) |
|
subprocess.run(["pip", "install", "transformers"]) |
|
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "-f", "https://download.pytorch.org/whl/torch_stable.html"]) |
|
|
|
import gradio as gr |
|
from transformers import WhisperProcessor, WhisperForConditionalGeneration |
|
|
|
|
|
processor = WhisperProcessor.from_pretrained("openai/whisper-large") |
|
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") |
|
model.config.forced_decoder_ids = None |
|
|
|
|
|
def transcribe_audio(audio_data): |
|
|
|
input_features = processor(audio_data, return_tensors="pt").input_features |
|
|
|
|
|
predicted_ids = model.generate(input_features) |
|
|
|
|
|
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) |
|
|
|
return transcription[0] |
|
|
|
|
|
def preprocess_audio(audio_data): |
|
|
|
return audio_data |
|
|
|
|
|
audio_input = gr.Audio(preprocess=preprocess_audio) |
|
gr.Interface(fn=transcribe_audio, inputs=audio_input, outputs="text").launch() |
|
|