cadasme's picture
feat: incluye spinner loading icon y output block para los resultados
9234e12
# Import the required libraries
import streamlit as st
import whisper
import speech_recognition as sr
from pydub import AudioSegment
import os
# Function to transcribe audio using OpenAI Whisper
def transcribe_whisper(model_name, file_path):
model = whisper.load_model(model_name)
result = model.transcribe(file_path)
return result["text"]
# Function to transcribe audio using Google Speech API
def transcribe_speech_recognition(file_path):
r = sr.Recognizer()
with sr.AudioFile(file_path) as source:
r.adjust_for_ambient_noise(source)
audio = r.record(source)
result = r.recognize_google(audio)
return result
# Function to convert mp3 file to wav
def convert_mp3_to_wav(mp3_path):
audio = AudioSegment.from_mp3(mp3_path)
wav_path = mp3_path.replace('.mp3', '.wav')
audio.export(wav_path, format="wav")
return wav_path
def main():
st.title('Transcriptor de Audio')
uploaded_file = st.file_uploader("Sube tu archivo de audio para transcribir", type=['wav', 'mp3'])
if uploaded_file is not None:
file_details = {"FileName":uploaded_file.name, "FileType":uploaded_file.type, "FileSize":uploaded_file.size}
st.write(file_details)
# Save uploaded file to temp directory
file_path = os.path.join("temp", uploaded_file.name)
with open(file_path, "wb") as f:
f.write(uploaded_file.getbuffer())
st.write("Archivo de audio cargado correctamente. Por favor, selecciona el m茅todo de transcripci贸n.")
transcription_method = st.selectbox('Escoge el m茅todo de transcripci贸n', ('OpenAI Whisper', 'Google Speech API'))
if transcription_method == 'OpenAI Whisper':
model_name = st.selectbox('Escoge el modelo de Whisper', ('base', 'small', 'medium', 'large', 'tiny'))
elif transcription_method == 'Google Speech API' and file_path.endswith('.mp3'):
# Convert mp3 to wav if Google Speech API is selected and file is in mp3 format
file_path = convert_mp3_to_wav(file_path)
if st.button('Transcribir'):
with st.spinner('Transcribiendo...'):
if transcription_method == 'OpenAI Whisper':
transcript = transcribe_whisper(model_name, file_path)
else:
transcript = transcribe_speech_recognition(file_path)
st.text_area('Resultado de la Transcripci贸n:', transcript, height=200)
if __name__ == "__main__":
main()