Boltz79's picture
Update app.py
defc213 verified
raw
history blame
4.35 kB
import gradio as gr
import numpy as np
from textblob import TextBlob
import speech_recognition as sr
class SentimentAnalyzer:
def __init__(self):
self.recognizer = sr.Recognizer()
def audio_to_text(self, audio):
"""Convert audio to text using speech recognition"""
try:
# Get audio data from Gradio input
sample_rate, audio_data = audio
# Convert audio data to audio file format that speech_recognition can use
import io
import scipy.io.wavfile as wav
byte_io = io.BytesIO()
wav.write(byte_io, sample_rate, audio_data.astype(np.int16))
byte_io.seek(0)
# Use speech recognition
with sr.AudioFile(byte_io) as source:
audio_data = self.recognizer.record(source)
text = self.recognizer.recognize_google(audio_data)
return text
except Exception as e:
return f"Error in speech recognition: {str(e)}"
def analyze_sentiment(self, text):
"""Analyze sentiment using TextBlob"""
try:
blob = TextBlob(text)
# Get polarity (-1 to 1) and subjectivity (0 to 1)
polarity = blob.sentiment.polarity
subjectivity = blob.sentiment.subjectivity
# Determine sentiment category
if polarity > 0:
sentiment = "Positive"
elif polarity < 0:
sentiment = "Negative"
else:
sentiment = "Neutral"
# Format results
results_text = f"""
Detected Text: "{text}"
Analysis Results:
- Overall Sentiment: {sentiment}
- Polarity Score: {polarity:.2f} (-1 to +1)
- Subjectivity Score: {subjectivity:.2f} (0 to 1)
"""
# Prepare plot data
plot_data = {
"labels": ["Polarity", "Subjectivity"],
"values": [polarity * 100, subjectivity * 100] # Convert to percentage for visualization
}
return results_text, plot_data
except Exception as e:
return f"Error in sentiment analysis: {str(e)}", None
def create_interface():
analyzer = SentimentAnalyzer()
def process_audio(audio):
if audio is None:
return "Please provide an audio input.", None
# Convert audio to text
text = analyzer.audio_to_text(audio)
if text.startswith("Error"):
return text, None
# Analyze sentiment
return analyzer.analyze_sentiment(text)
# Create Gradio interface
with gr.Blocks() as interface:
gr.Markdown("# 🎤 Speech Sentiment Analysis")
gr.Markdown("""
Speak or upload an audio file to analyze its emotional content.
The system will convert speech to text and analyze the sentiment.
""")
with gr.Row():
with gr.Column():
audio_input = gr.Audio(
label="Upload or Record Audio",
type="numpy",
sources=["microphone", "upload"]
)
analyze_btn = gr.Button("Analyze Sentiment")
with gr.Column():
output_text = gr.Textbox(
label="Analysis Results",
lines=8
)
output_plot = gr.BarPlot(
title="Sentiment Scores",
x_title="Metrics",
y_title="Score (%)"
)
analyze_btn.click(
fn=process_audio,
inputs=[audio_input],
outputs=[output_text, output_plot]
)
gr.Markdown("""
### How to Use:
1. Click the microphone button to record or upload an audio file
2. Click "Analyze Sentiment" to process
3. View the results showing:
- Detected text from speech
- Overall sentiment (Positive/Negative/Neutral)
- Polarity score (-100% to +100%)
- Subjectivity score (0% to 100%)
""")
return interface
if __name__ == "__main__":
demo = create_interface()
demo.launch(share=True)