Spaces:
Runtime error
Runtime error
File size: 1,085 Bytes
a19090b 4e7301a e59b53b 4e7301a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
# import gradio as gr
# from googletrans import Translator
# import torch
# # Initialize Translator
# from transformers import pipeline
# translator = Translator()
# MODEL_NAME = "openai/whisper-base"
# device = 0 if torch.cuda.is_available() else "cpu"
# pipe = pipeline(
# task="automatic-speech-recognition",
# model=MODEL_NAME,
# chunk_length_s=30,
# device=device,
# )
# def transcribe_audio(audio):
# text = pipe(audio)["text"]
# return text
# # return translated_text
# audio_record = gr.inputs.Audio(source='microphone', label='Record Audio')
# output_text = gr.outputs.Textbox(label='Transcription')
# interface = gr.Interface(fn=transcribe_audio, inputs=audio_record, outputs=output_text)
# interface.launch()
import gradio as gr
from transformers import pipeline
modelo = pipeline("automatic-speech-recognition", model="openai/whisper-base")
def transcribe(audio):
text = modelo(audio)["text"]
return text
gr.Interface(
fn=transcribe,
inputs=[gr.Audio(source="microphone", type="filepath")],
outputs=["textbox"]
).launch()
|