EMOTIV / app.py
ans123's picture
Create app.py
9c2bcdf verified
raw
history blame
2.98 kB
import cv2
import time
import csv
import gradio as gr
import numpy as np
import sounddevice as sd
import soundfile as sf
from deepface import DeepFace
import tempfile
import os
from scipy.io.wavfile import write as write_wav
# === Audio recording config ===
SAMPLE_RATE = 16000
DURATION = 30 # seconds
def record_audio(filename="audio_recording.wav"):
print("Recording audio for 30 seconds...")
audio = sd.rec(int(SAMPLE_RATE * DURATION), samplerate=SAMPLE_RATE, channels=1, dtype='int16')
sd.wait()
write_wav(filename, SAMPLE_RATE, audio)
return filename
def map_emotions_to_metrics(emotion):
valence = emotion.get("happy", 0) / 100
arousal = (emotion.get("angry", 0) + emotion.get("surprise", 0)) / 200
stress_index = emotion.get("fear", 0) / 100
engagement_level = 1 - emotion.get("neutral", 0) / 100
dominance = (emotion.get("angry", 0) + emotion.get("disgust", 0)) / 200
return valence, arousal, dominance, stress_index, engagement_level
def analyze_inputs():
cap = cv2.VideoCapture(0)
start_time = time.time()
data = []
# Start audio recording in parallel
audio_file_path = os.path.join(tempfile.gettempdir(), "temp_audio.wav")
record_audio(audio_file_path)
for i in range(DURATION):
ret, frame = cap.read()
if not ret:
continue
try:
result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False)
emotion = result[0]['emotion']
valence, arousal, dominance, stress_index, engagement_level = map_emotions_to_metrics(emotion)
data.append([
round(time.time() - start_time, 2),
round(valence, 3),
round(arousal, 3),
round(dominance, 3),
round(stress_index, 3),
round(engagement_level, 3)
])
except Exception as e:
print("Error analyzing frame:", e)
time.sleep(1)
cap.release()
# Write to CSV
csv_path = os.path.join(tempfile.gettempdir(), "emotional_metrics.csv")
with open(csv_path, mode='w', newline='') as f:
writer = csv.writer(f)
writer.writerow(["timestamp", "valence", "arousal", "dominance", "stress_index", "engagement_level"])
writer.writerows(data)
return csv_path
def demo_interface(campaign_text):
csv_output = analyze_inputs()
return csv_output
demo = gr.Interface(
fn=demo_interface,
inputs=gr.Textbox(label="Enter Campaign Description", lines=2, placeholder="Describe your campaign..."),
outputs=gr.File(label="Download Emotional Metrics (CSV Output)"),
title="πŸ“Š Complex Campaign Emotion Response Engine",
description="This demo captures webcam and microphone input for 30 seconds. It analyzes facial expressions using DeepFace and records audio. The output is a downloadable CSV of emotional metrics over time."
)
if __name__ == "__main__":
demo.launch()