AudioBot / app.py
Deepakkori45's picture
Update app.py
ba9b23b verified
raw
history blame
5.98 kB
import streamlit as st
import os
import librosa
import torch
from pydub import AudioSegment
from pydub.silence import split_on_silence
from dotenv import load_dotenv
from tempfile import NamedTemporaryFile
import math
from docx import Document
import time
from transformers import WhisperProcessor, WhisperForConditionalGeneration
# Load environment variables from .env file (if needed for other config)
load_dotenv()
# Create a placeholder for status messages
status_placeholder = st.empty()
# Display status while loading the model
status_placeholder.info("Loading Whisper model from Hugging Face...")
@st.cache_resource
def load_whisper_model():
"""
Load the Whisper model and processor from Hugging Face.
Change 'openai/whisper-base' to another variant if needed.
"""
model_name = "openai/whisper-base"
processor = WhisperProcessor.from_pretrained(model_name)
model = WhisperForConditionalGeneration.from_pretrained(model_name)
return processor, model
processor, model = load_whisper_model()
status_placeholder.info("Whisper model loaded successfully!")
def split_audio_on_silence(audio_file_path, min_silence_len=500, silence_thresh=-40, keep_silence=250):
"""
Split an audio file into chunks using silence detection.
"""
status_placeholder.info("Splitting audio on silence...")
audio = AudioSegment.from_file(audio_file_path)
chunks = split_on_silence(
audio,
min_silence_len=min_silence_len,
silence_thresh=silence_thresh,
keep_silence=keep_silence
)
status_placeholder.info(f"Audio split into {len(chunks)} chunks.")
return chunks
def transcribe(audio_file):
"""
Transcribe an audio file using the locally loaded Whisper model from Hugging Face.
This uses librosa to load and resample the audio as required.
"""
# Load audio with librosa at 16kHz (as required by Whisper)
speech, sr = librosa.load(audio_file, sr=16000)
input_features = processor(speech, sampling_rate=16000, return_tensors="pt").input_features
predicted_ids = model.generate(input_features)
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
return transcription
def transcribe_chunk(chunk, index, min_length_ms=100):
"""
Transcribe an individual audio chunk.
"""
if len(chunk) < min_length_ms:
st.warning(f"Chunk {index} is too short to be processed.")
return (index, "")
# Save chunk temporarily as a WAV file
with NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
chunk.export(temp_audio_file.name, format="wav")
temp_audio_file_path = temp_audio_file.name
status_placeholder.info(f"Transcribing chunk {index}...")
transcription = transcribe(temp_audio_file_path)
os.remove(temp_audio_file_path)
st.write(f"Transcription for chunk {index}: {transcription}")
return (index, transcription)
def process_audio_chunks(audio_chunks):
"""
Process and transcribe each audio chunk in sequence.
Reports the total time taken.
"""
transcriptions = []
min_length_ms = 100 # minimum duration for processing
start_transcription = time.time()
for i, chunk in enumerate(audio_chunks):
index, text = transcribe_chunk(chunk, i, min_length_ms)
transcriptions.append((index, text))
transcriptions.sort(key=lambda x: x[0])
total_time = time.time() - start_transcription
status_placeholder.info(f"All chunks transcribed in {total_time:.2f} seconds.")
combined = " ".join([text for idx, text in transcriptions])
return combined
def save_transcription_to_docx(transcription, audio_file_path):
"""
Save the transcription as a .docx file.
"""
base_name = os.path.splitext(os.path.basename(audio_file_path))[0]
output_file_name = f"{base_name}_full_transcription.docx"
status_placeholder.info("Saving transcription to DOCX...")
doc = Document()
doc.add_paragraph(transcription)
doc.save(output_file_name)
status_placeholder.info("Transcription saved as DOCX.")
return output_file_name
st.title("Audio Transcription with Whisper (Local via Hugging Face)")
# Allow uploading of audio or video files
uploaded_file = st.file_uploader("Upload an audio or video file", type=["wav", "mp3", "ogg", "m4a", "mp4", "mov"])
if 'transcription' not in st.session_state:
st.session_state.transcription = None
if uploaded_file is not None and st.session_state.transcription is None:
st.audio(uploaded_file)
# Save uploaded file temporarily
file_extension = uploaded_file.name.split(".")[-1]
temp_audio_file = f"temp_audio_file.{file_extension}"
with open(temp_audio_file, "wb") as f:
f.write(uploaded_file.getbuffer())
processing_start = time.time()
with st.spinner('Processing audio...'):
audio_chunks = split_audio_on_silence(temp_audio_file)
transcription = process_audio_chunks(audio_chunks)
if transcription:
st.session_state.transcription = transcription
st.success('Transcription complete!')
output_docx_file = save_transcription_to_docx(transcription, uploaded_file.name)
st.session_state.output_docx_file = output_docx_file
processing_duration = time.time() - processing_start
status_placeholder.info(f"Total processing time: {processing_duration:.2f} seconds.")
if os.path.exists(temp_audio_file):
os.remove(temp_audio_file)
if st.session_state.transcription:
st.text_area("Transcription", st.session_state.transcription, key="transcription_area_final")
with open(st.session_state.output_docx_file, "rb") as docx_file:
st.download_button(
label="Download Transcription (.docx)",
data=docx_file,
file_name=st.session_state.output_docx_file,
mime='application/vnd.openxmlformats-officedocument.wordprocessingml.document'
)