Mark0047's picture
Update app.py
de9f399 verified
raw
history blame
1.45 kB
import gradio as gr
from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration
import torch
import soundfile as sf
# Load Whisper model and processor
processor = WhisperProcessor.from_pretrained("openai/whisper-large")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
# Load the Hugging Face emotion classifier
emotion_classifier = pipeline("text-classification", model="SamLowe/roberta-base-go_emotions", top_k=None)
# Define a function to process audio and analyze emotions
def transcribe_and_analyze(audio_path):
# Load audio from the provided file
audio, sample_rate = sf.read(audio_path)
# Process audio with Whisper
input_features = processor(audio, sampling_rate=sample_rate, return_tensors="pt").input_features
predicted_ids = model.generate(input_features)
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
# Analyze emotions in the transcription
emotions = emotion_classifier(transcription)
return transcription, emotions
# Create Gradio interface
interface = gr.Interface(
fn=transcribe_and_analyze,
inputs=gr.Audio(type="filepath"), # Accept audio input
outputs=[
gr.Textbox(label="Transcription"), # Display transcription
gr.JSON(label="Emotion Analysis") # Display emotion analysis
],
title="Audio to Emotion Analysis"
)
# Launch the Gradio app
interface.launch()