Spaces:
Running
Running
import torch | |
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC | |
import soundfile as sf | |
import streamlit as st | |
# Load the processor and model | |
processor = Wav2Vec2Processor.from_pretrained("openbmb/MiniCPM-o-2_6") | |
model = Wav2Vec2ForCTC.from_pretrained("openbmb/MiniCPM-o-2_6") | |
def transcribe_audio(file_path): | |
# Load audio file | |
audio_input, sample_rate = sf.read(file_path) | |
# Preprocess the audio | |
input_values = processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values | |
# Perform inference | |
with torch.no_grad(): | |
logits = model(input_values).logits | |
# Decode the logits to text | |
predicted_ids = torch.argmax(logits, dim=-1) | |
transcription = processor.batch_decode(predicted_ids) | |
return transcription[0] | |
uploaded_file = st.file_uploader("Upload an audio", type=["mp3", "wav"]) | |
if uploaded_file is not None: | |
transcription = transcribe_audio(uploaded_file) | |
st.write(transcription) | |
# if __name__ == "__main__": | |
# audio_file_path = "CAR0005.mp3" | |
# transcription = transcribe_audio(audio_file_path) | |
# print("Transcription:", transcription) |