File size: 2,982 Bytes
9c2bcdf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import cv2
import time
import csv
import gradio as gr
import numpy as np
import sounddevice as sd
import soundfile as sf
from deepface import DeepFace
import tempfile
import os
from scipy.io.wavfile import write as write_wav


# === Audio recording config ===
SAMPLE_RATE = 16000
DURATION = 30  # seconds


def record_audio(filename="audio_recording.wav"):
    print("Recording audio for 30 seconds...")
    audio = sd.rec(int(SAMPLE_RATE * DURATION), samplerate=SAMPLE_RATE, channels=1, dtype='int16')
    sd.wait()
    write_wav(filename, SAMPLE_RATE, audio)
    return filename


def map_emotions_to_metrics(emotion):
    valence = emotion.get("happy", 0) / 100
    arousal = (emotion.get("angry", 0) + emotion.get("surprise", 0)) / 200
    stress_index = emotion.get("fear", 0) / 100
    engagement_level = 1 - emotion.get("neutral", 0) / 100
    dominance = (emotion.get("angry", 0) + emotion.get("disgust", 0)) / 200
    return valence, arousal, dominance, stress_index, engagement_level


def analyze_inputs():
    cap = cv2.VideoCapture(0)
    start_time = time.time()
    data = []

    # Start audio recording in parallel
    audio_file_path = os.path.join(tempfile.gettempdir(), "temp_audio.wav")
    record_audio(audio_file_path)

    for i in range(DURATION):
        ret, frame = cap.read()
        if not ret:
            continue

        try:
            result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False)
            emotion = result[0]['emotion']
            valence, arousal, dominance, stress_index, engagement_level = map_emotions_to_metrics(emotion)

            data.append([
                round(time.time() - start_time, 2),
                round(valence, 3),
                round(arousal, 3),
                round(dominance, 3),
                round(stress_index, 3),
                round(engagement_level, 3)
            ])

        except Exception as e:
            print("Error analyzing frame:", e)
        time.sleep(1)

    cap.release()

    # Write to CSV
    csv_path = os.path.join(tempfile.gettempdir(), "emotional_metrics.csv")
    with open(csv_path, mode='w', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(["timestamp", "valence", "arousal", "dominance", "stress_index", "engagement_level"])
        writer.writerows(data)

    return csv_path


def demo_interface(campaign_text):
    csv_output = analyze_inputs()
    return csv_output


demo = gr.Interface(
    fn=demo_interface,
    inputs=gr.Textbox(label="Enter Campaign Description", lines=2, placeholder="Describe your campaign..."),
    outputs=gr.File(label="Download Emotional Metrics (CSV Output)"),
    title="📊 Complex Campaign Emotion Response Engine",
    description="This demo captures webcam and microphone input for 30 seconds. It analyzes facial expressions using DeepFace and records audio. The output is a downloadable CSV of emotional metrics over time."
)

if __name__ == "__main__":
    demo.launch()