File size: 910 Bytes
4be1181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import streamlit as st
from transformers import pipeline
import torch

# Load the Whisper model
model_id = "openai/whisper-tiny.en"
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = pipeline("automatic-speech-recognition", model=model_id, device=device)

def transcribe_audio(audio_file):
    # Read audio file
    audio_bytes = audio_file.read()
    # Get transcription results
    results = pipe(audio_bytes)
    # Return the transcription
    return results

# Streamlit interface
st.title("Speech to Text with Whisper")
audio_file = st.file_uploader("Upload an audio file", type=['wav', 'mp3', 'ogg'])

if audio_file is not None:
    # Display a button to transcribe the audio
    if st.button('Transcribe'):
        with st.spinner(f'Transcribing audio...'):
            transcription = transcribe_audio(audio_file)
            st.text_area("Transcription", transcription['text'], height=150)