Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import torchaudio
|
4 |
+
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
|
5 |
+
|
6 |
+
# Load MMS ASR model
|
7 |
+
MODEL_NAME = "facebook/mms-1b-all"
|
8 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
9 |
+
|
10 |
+
processor = AutoProcessor.from_pretrained(MODEL_NAME)
|
11 |
+
model = AutoModelForSpeechSeq2Seq.from_pretrained(MODEL_NAME).to(device)
|
12 |
+
asr_pipeline = pipeline("automatic-speech-recognition", model=model, processor=processor, torch_dtype=torch.float16, device=0 if device == "cuda" else -1)
|
13 |
+
|
14 |
+
# Speech-to-text function
|
15 |
+
def transcribe(audio):
|
16 |
+
waveform, sr = torchaudio.load(audio)
|
17 |
+
waveform = torchaudio.transforms.Resample(sr, 16000)(waveform) # Ensure 16kHz sample rate
|
18 |
+
text = asr_pipeline({"array": waveform.squeeze().numpy(), "sampling_rate": 16000})["text"]
|
19 |
+
return text
|
20 |
+
|
21 |
+
# Gradio UI
|
22 |
+
gr.Interface(
|
23 |
+
fn=transcribe,
|
24 |
+
inputs=gr.Audio(source="microphone", type="filepath"),
|
25 |
+
outputs=gr.Text(label="Transcription"),
|
26 |
+
title="Real-time Speech-to-Text",
|
27 |
+
description="Speak into your microphone and see the transcribed text.",
|
28 |
+
).launch()
|