ans123 commited on
Commit
9c2bcdf
·
verified ·
1 Parent(s): 00fd07c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import time
3
+ import csv
4
+ import gradio as gr
5
+ import numpy as np
6
+ import sounddevice as sd
7
+ import soundfile as sf
8
+ from deepface import DeepFace
9
+ import tempfile
10
+ import os
11
+ from scipy.io.wavfile import write as write_wav
12
+
13
+
14
+ # === Audio recording config ===
15
+ SAMPLE_RATE = 16000
16
+ DURATION = 30 # seconds
17
+
18
+
19
+ def record_audio(filename="audio_recording.wav"):
20
+ print("Recording audio for 30 seconds...")
21
+ audio = sd.rec(int(SAMPLE_RATE * DURATION), samplerate=SAMPLE_RATE, channels=1, dtype='int16')
22
+ sd.wait()
23
+ write_wav(filename, SAMPLE_RATE, audio)
24
+ return filename
25
+
26
+
27
+ def map_emotions_to_metrics(emotion):
28
+ valence = emotion.get("happy", 0) / 100
29
+ arousal = (emotion.get("angry", 0) + emotion.get("surprise", 0)) / 200
30
+ stress_index = emotion.get("fear", 0) / 100
31
+ engagement_level = 1 - emotion.get("neutral", 0) / 100
32
+ dominance = (emotion.get("angry", 0) + emotion.get("disgust", 0)) / 200
33
+ return valence, arousal, dominance, stress_index, engagement_level
34
+
35
+
36
+ def analyze_inputs():
37
+ cap = cv2.VideoCapture(0)
38
+ start_time = time.time()
39
+ data = []
40
+
41
+ # Start audio recording in parallel
42
+ audio_file_path = os.path.join(tempfile.gettempdir(), "temp_audio.wav")
43
+ record_audio(audio_file_path)
44
+
45
+ for i in range(DURATION):
46
+ ret, frame = cap.read()
47
+ if not ret:
48
+ continue
49
+
50
+ try:
51
+ result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False)
52
+ emotion = result[0]['emotion']
53
+ valence, arousal, dominance, stress_index, engagement_level = map_emotions_to_metrics(emotion)
54
+
55
+ data.append([
56
+ round(time.time() - start_time, 2),
57
+ round(valence, 3),
58
+ round(arousal, 3),
59
+ round(dominance, 3),
60
+ round(stress_index, 3),
61
+ round(engagement_level, 3)
62
+ ])
63
+
64
+ except Exception as e:
65
+ print("Error analyzing frame:", e)
66
+ time.sleep(1)
67
+
68
+ cap.release()
69
+
70
+ # Write to CSV
71
+ csv_path = os.path.join(tempfile.gettempdir(), "emotional_metrics.csv")
72
+ with open(csv_path, mode='w', newline='') as f:
73
+ writer = csv.writer(f)
74
+ writer.writerow(["timestamp", "valence", "arousal", "dominance", "stress_index", "engagement_level"])
75
+ writer.writerows(data)
76
+
77
+ return csv_path
78
+
79
+
80
+ def demo_interface(campaign_text):
81
+ csv_output = analyze_inputs()
82
+ return csv_output
83
+
84
+
85
+ demo = gr.Interface(
86
+ fn=demo_interface,
87
+ inputs=gr.Textbox(label="Enter Campaign Description", lines=2, placeholder="Describe your campaign..."),
88
+ outputs=gr.File(label="Download Emotional Metrics (CSV Output)"),
89
+ title="📊 Complex Campaign Emotion Response Engine",
90
+ description="This demo captures webcam and microphone input for 30 seconds. It analyzes facial expressions using DeepFace and records audio. The output is a downloadable CSV of emotional metrics over time."
91
+ )
92
+
93
+ if __name__ == "__main__":
94
+ demo.launch()