Spaces:
Runtime error
Runtime error
File size: 1,189 Bytes
dc7d091 e792387 dc7d091 e792387 dc7d091 563e0ed a843b99 dc7d091 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import soundfile as sf
import gradio as gr
# Load the pre-trained processor and model
processor = Wav2Vec2Processor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-chinese-zh-cn")
model = Wav2Vec2ForCTC.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-chinese-zh-cn")
def speech_to_text(audio):
# Load audio file
speech, sample_rate = sf.read(audio)
# Preprocess the audio file
inputs = processor(speech, sampling_rate=sample_rate, return_tensors="pt", padding=True)
# Perform inference
with torch.no_grad():
logits = model(**inputs).logits
# Decode the predicted ids to text
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)
return transcription[0]
# Create the Gradio interface
iface = gr.Interface(
fn=speech_to_text,
inputs=gr.Audio(type="filepath"),
outputs=gr.Textbox(),
title="Chinese Speech Recognition",
description="Upload an audio file and get the transcribed text using the wav2vec2-large-xlsr-53-chinese-zh-cn model."
)
if __name__ == "__main__":
iface.launch()
|