Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
import librosa | |
import torch | |
from pydub import AudioSegment | |
from pydub.silence import split_on_silence | |
from dotenv import load_dotenv | |
from tempfile import NamedTemporaryFile | |
import math | |
from docx import Document | |
from transformers import WhisperProcessor, WhisperForConditionalGeneration | |
# Load environment variables from .env file (if needed) | |
load_dotenv() | |
def load_whisper_model(): | |
""" | |
Load the Whisper model and processor from Hugging Face. | |
You can change the model variant ("openai/whisper-base" is used here). | |
""" | |
model_name = "openai/whisper-base" # Options: "tiny", "base", "small", "medium", "large" | |
processor = WhisperProcessor.from_pretrained(model_name) | |
model = WhisperForConditionalGeneration.from_pretrained(model_name) | |
return processor, model | |
processor, model = load_whisper_model() | |
def split_audio_on_silence(audio_file_path, min_silence_len=500, silence_thresh=-40, keep_silence=250): | |
""" | |
Split an audio file into chunks using silence detection. | |
Args: | |
audio_file_path (str): Path to the audio file. | |
min_silence_len (int): Minimum length of silence (in ms) required for a split. | |
silence_thresh (int): The volume (in dBFS) below which is considered silence. | |
keep_silence (int): Amount of silence (in ms) to retain at the beginning and end of each chunk. | |
Returns: | |
list: List of AudioSegment chunks. | |
""" | |
audio = AudioSegment.from_file(audio_file_path) | |
chunks = split_on_silence( | |
audio, | |
min_silence_len=min_silence_len, | |
silence_thresh=silence_thresh, | |
keep_silence=keep_silence | |
) | |
return chunks | |
def transcribe(audio_file): | |
""" | |
Transcribe an audio file using the locally loaded Whisper model from Hugging Face. | |
Args: | |
audio_file (str): Path to the audio file. | |
Returns: | |
str: Transcribed text. | |
""" | |
# Load audio using librosa, resampling to 16000 Hz as required by Whisper | |
speech, sr = librosa.load(audio_file, sr=16000) | |
input_features = processor(speech, sampling_rate=16000, return_tensors="pt").input_features | |
# Generate transcription | |
predicted_ids = model.generate(input_features) | |
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] | |
return transcription | |
def process_audio_chunks(audio_chunks): | |
""" | |
Process and transcribe each audio chunk. | |
Args: | |
audio_chunks (list): List of AudioSegment chunks. | |
Returns: | |
str: Combined transcription from all chunks. | |
""" | |
transcriptions = [] | |
min_length_ms = 100 # Minimum length required (0.1 seconds) | |
for i, chunk in enumerate(audio_chunks): | |
if len(chunk) < min_length_ms: | |
st.warning(f"Chunk {i} is too short to be processed.") | |
continue | |
with NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file: | |
chunk.export(temp_audio_file.name, format="wav") | |
temp_audio_file_path = temp_audio_file.name | |
transcription = transcribe(temp_audio_file_path) | |
if transcription: | |
transcriptions.append(transcription) | |
st.write(f"Transcription for chunk {i}: {transcription}") | |
os.remove(temp_audio_file_path) | |
return " ".join(transcriptions) | |
def save_transcription_to_docx(transcription, audio_file_path): | |
""" | |
Save the transcription as a .docx file. | |
Args: | |
transcription (str): Transcribed text. | |
audio_file_path (str): Path to the original audio file for naming purposes. | |
Returns: | |
str: Path to the saved .docx file. | |
""" | |
base_name = os.path.splitext(os.path.basename(audio_file_path))[0] | |
output_file_name = f"{base_name}_full_transcription.docx" | |
doc = Document() | |
doc.add_paragraph(transcription) | |
doc.save(output_file_name) | |
return output_file_name | |
st.title("Audio Transcription with Whisper (Local via Hugging Face)") | |
# Allow uploading of audio or video files | |
uploaded_file = st.file_uploader("Upload an audio or video file", type=["wav", "mp3", "ogg", "m4a", "mp4", "mov"]) | |
if 'transcription' not in st.session_state: | |
st.session_state.transcription = None | |
if uploaded_file is not None and st.session_state.transcription is None: | |
st.audio(uploaded_file) | |
# Save uploaded file temporarily | |
file_extension = uploaded_file.name.split(".")[-1] | |
temp_audio_file = f"temp_audio_file.{file_extension}" | |
with open(temp_audio_file, "wb") as f: | |
f.write(uploaded_file.getbuffer()) | |
# Split and process audio using silence detection | |
with st.spinner('Transcribing...'): | |
audio_chunks = split_audio_on_silence( | |
temp_audio_file, | |
min_silence_len=500, | |
silence_thresh=-40, | |
keep_silence=250 | |
) | |
transcription = process_audio_chunks(audio_chunks) | |
if transcription: | |
st.session_state.transcription = transcription | |
st.success('Transcription complete!') | |
output_docx_file = save_transcription_to_docx(transcription, uploaded_file.name) | |
st.session_state.output_docx_file = output_docx_file | |
if os.path.exists(temp_audio_file): | |
os.remove(temp_audio_file) | |
if st.session_state.transcription: | |
st.text_area("Transcription", st.session_state.transcription, key="transcription_area_final") | |
with open(st.session_state.output_docx_file, "rb") as docx_file: | |
st.download_button( | |
label="Download Transcription (.docx)", | |
data=docx_file, | |
file_name=st.session_state.output_docx_file, | |
mime='application/vnd.openxmlformats-officedocument.wordprocessingml.document' | |
) | |