import streamlit as st import os from pydub import AudioSegment from pydub.silence import split_on_silence from dotenv import load_dotenv from tempfile import NamedTemporaryFile import math from docx import Document import whisper # Load environment variables from .env file (if needed for other configurations) load_dotenv() @st.cache_resource def load_whisper_model(): """ Load the Whisper model once and cache it for future use. You can choose the model size: "tiny", "base", "small", "medium", or "large". """ model = whisper.load_model("base") return model # Load the Whisper model globally so it’s only loaded once. model = load_whisper_model() def split_audio_on_silence(audio_file_path, min_silence_len=500, silence_thresh=-40, keep_silence=250): """ Split an audio file into chunks using silence detection. Args: audio_file_path (str): Path to the audio file. min_silence_len (int): Minimum length of silence (in ms) required for a split. silence_thresh (int): The volume (in dBFS) below which is considered silence. keep_silence (int): Amount of silence (in ms) to retain at the beginning and end of each chunk. Returns: list: List of AudioSegment chunks. """ audio = AudioSegment.from_file(audio_file_path) chunks = split_on_silence( audio, min_silence_len=min_silence_len, silence_thresh=silence_thresh, keep_silence=keep_silence ) return chunks def transcribe(audio_file): """ Transcribe an audio file using the locally loaded Whisper model. Args: audio_file (str): Path to the audio file. Returns: str: Transcribed text. """ result = model.transcribe(audio_file, language="en") return result["text"] def process_audio_chunks(audio_chunks): """ Process and transcribe each audio chunk in sequence. Args: audio_chunks (list): List of AudioSegment chunks. Returns: str: Combined transcription from all chunks. """ transcriptions = [] min_length_ms = 100 # Minimum length required for processing for i, chunk in enumerate(audio_chunks): if len(chunk) < min_length_ms: st.warning(f"Chunk {i} is too short to be processed.") continue # Save the chunk temporarily as a WAV file with NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file: chunk.export(temp_audio_file.name, format="wav") temp_audio_file_path = temp_audio_file.name transcription = transcribe(temp_audio_file_path) if transcription: transcriptions.append(transcription) st.write(f"Transcription for chunk {i}: {transcription}") os.remove(temp_audio_file_path) return " ".join(transcriptions) def save_transcription_to_docx(transcription, audio_file_path): """ Save the transcription as a .docx file. Args: transcription (str): Transcribed text. audio_file_path (str): Path to the original audio file for naming purposes. Returns: str: Path to the saved .docx file. """ base_name = os.path.splitext(os.path.basename(audio_file_path))[0] output_file_name = f"{base_name}_full_transcription.docx" doc = Document() doc.add_paragraph(transcription) doc.save(output_file_name) return output_file_name st.title("Audio Transcription with Whisper (Local)") # Allow uploading of audio or video files uploaded_file = st.file_uploader("Upload an audio or video file", type=["wav", "mp3", "ogg", "m4a", "mp4", "mov"]) if 'transcription' not in st.session_state: st.session_state.transcription = None if uploaded_file is not None and st.session_state.transcription is None: st.audio(uploaded_file) # Save uploaded file temporarily file_extension = uploaded_file.name.split(".")[-1] temp_audio_file = f"temp_audio_file.{file_extension}" with open(temp_audio_file, "wb") as f: f.write(uploaded_file.getbuffer()) # Split and process audio using silence detection with st.spinner('Transcribing...'): audio_chunks = split_audio_on_silence( temp_audio_file, min_silence_len=500, # adjust based on your audio silence_thresh=-40, # adjust based on ambient noise level keep_silence=250 # retains a bit of silence at the edges ) transcription = process_audio_chunks(audio_chunks) if transcription: st.session_state.transcription = transcription st.success('Transcription complete!') output_docx_file = save_transcription_to_docx(transcription, uploaded_file.name) st.session_state.output_docx_file = output_docx_file if os.path.exists(temp_audio_file): os.remove(temp_audio_file) if st.session_state.transcription: st.text_area("Transcription", st.session_state.transcription, key="transcription_area_final") with open(st.session_state.output_docx_file, "rb") as docx_file: st.download_button( label="Download Transcription (.docx)", data=docx_file, file_name=st.session_state.output_docx_file, mime='application/vnd.openxmlformats-officedocument.wordprocessingml.document' )