Spaces:
Runtime error
Runtime error
File size: 1,596 Bytes
ffe71b0 e8f1a46 5c7add5 8f2192a 402ee04 ffe71b0 df575df ffe6cbc df575df ffe71b0 5c7add5 8f2192a 332a470 402ee04 8f2192a ffe71b0 30f568b ffe71b0 402ee04 ffe71b0 e8f1a46 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import streamlit as st
import stable_whisper
import json
import torch
import soundfile as sf
import librosa
from io import BytesIO
# Create a dropdown to select the model
model_options = ["base", "small", "medium", "large", "large-v2"]
default_model = "small"
model_name = st.selectbox("Select a model", options=model_options, index=model_options.index(default_model))
# Load the selected model
model = stable_whisper.load_model(model_name)
# Create a file uploader for the audio file
audiofile = st.file_uploader("Upload an audio file", type=["mp3", "wav"])
# Create a button to run the prediction
if st.button('Transcribe'):
if audiofile is not None:
# Read the audio file into a numpy array
audio_data, sample_rate = sf.read(BytesIO(audiofile.read()))
# Resample the audio data if necessary
expected_sample_rate = 16000 # replace with the sample rate expected by the model
if sample_rate != expected_sample_rate:
audio_data = librosa.resample(audio_data, orig_sr=sample_rate, target_sr=expected_sample_rate)
# Convert the audio data to float
audio_data = torch.from_numpy(audio_data).float()
# Transcribe the audio file
result = model.transcribe(audio_data)
# Convert the result to JSON and display it
if isinstance(result, stable_whisper.WhisperResult):
result_json = result.to_dict() # replace with actual method if exists
else:
result_json = json.loads(result)
st.json(result_json)
else:
st.write("Please upload an audio file.") |