Sagnik1750 commited on
Commit
ae9c4b0
Β·
verified Β·
1 Parent(s): 1692af6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -9
app.py CHANGED
@@ -9,6 +9,7 @@ from facenet_pytorch import MTCNN
9
  from transformers import AutoFeatureExtractor, AutoModelForImageClassification
10
  from PIL import Image
11
  import os
 
12
 
13
  # Load models
14
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
@@ -36,7 +37,7 @@ def detect_emotion(frame):
36
  return model.config.id2label[torch.argmax(probs).item()]
37
 
38
  # Process Video
39
- def process_video(video_path):
40
  cap = cv2.VideoCapture(video_path)
41
  fps = int(cap.get(cv2.CAP_PROP_FPS))
42
  frame_width, frame_height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
@@ -44,6 +45,8 @@ def process_video(video_path):
44
  out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))
45
 
46
  emotion_counts = {}
 
 
47
 
48
  while cap.isOpened():
49
  ret, frame = cap.read()
@@ -61,6 +64,9 @@ def process_video(video_path):
61
 
62
  out.write(frame)
63
 
 
 
 
64
  cap.release()
65
  out.release()
66
 
@@ -74,12 +80,62 @@ def process_video(video_path):
74
 
75
  return emotion, "emotion_distribution.jpg", out_path
76
 
77
- # Gradio Interface
78
- gr.Interface(
79
- fn=process_video,
80
- inputs=[gr.File(type="filepath")],
81
- outputs=["text", "image", "file"],
82
- title="Emotion Analysis from Video",
83
- description="Upload a video to analyze frame-by-frame emotion changes, get an emotion distribution pie chart, and a processed video with emotions.",
84
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
 
 
 
9
  from transformers import AutoFeatureExtractor, AutoModelForImageClassification
10
  from PIL import Image
11
  import os
12
+ import time
13
 
14
  # Load models
15
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
37
  return model.config.id2label[torch.argmax(probs).item()]
38
 
39
  # Process Video
40
+ def process_video(video_path, progress=gr.Progress()):
41
  cap = cv2.VideoCapture(video_path)
42
  fps = int(cap.get(cv2.CAP_PROP_FPS))
43
  frame_width, frame_height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
 
45
  out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))
46
 
47
  emotion_counts = {}
48
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
49
+ processed_frames = 0
50
 
51
  while cap.isOpened():
52
  ret, frame = cap.read()
 
64
 
65
  out.write(frame)
66
 
67
+ processed_frames += 1
68
+ progress((processed_frames / total_frames) * 100)
69
+
70
  cap.release()
71
  out.release()
72
 
 
80
 
81
  return emotion, "emotion_distribution.jpg", out_path
82
 
83
+ # Custom CSS for styling
84
+ css = """
85
+ h1 {
86
+ text-align: center;
87
+ color: #ffffff;
88
+ font-size: 32px;
89
+ font-weight: bold;
90
+ }
91
+
92
+ .gradio-container {
93
+ background-color: #1E1E1E;
94
+ color: #ffffff;
95
+ padding: 20px;
96
+ font-family: 'Arial', sans-serif;
97
+ }
98
+
99
+ button {
100
+ font-size: 18px !important;
101
+ padding: 10px 15px !important;
102
+ background-color: #00BFFF !important;
103
+ color: white !important;
104
+ border-radius: 10px !important;
105
+ }
106
+
107
+ .gr-text-input {
108
+ background-color: #2E2E2E;
109
+ color: white;
110
+ border: 1px solid #00BFFF;
111
+ }
112
+ """
113
+
114
+ # Gradio Interface with Enhanced UI
115
+ with gr.Blocks(css=css) as demo:
116
+ with gr.Row():
117
+ gr.Markdown("<h1>🎭 Emotion Analysis from Video πŸŽ₯</h1>")
118
+
119
+ with gr.Row():
120
+ video_input = gr.File(label="πŸ“€ Upload your video", type="filepath")
121
+
122
+ with gr.Row():
123
+ analyze_button = gr.Button("πŸš€ Analyze Video")
124
+
125
+ with gr.Row():
126
+ result_text = gr.Textbox(label="Detected Emotion", interactive=False)
127
+
128
+ with gr.Row():
129
+ emotion_chart = gr.Image(label="πŸ“Š Emotion Distribution", interactive=False)
130
+
131
+ with gr.Row():
132
+ processed_video = gr.Video(label="🎞 Processed Video with Emotion Detection")
133
+
134
+ analyze_button.click(
135
+ process_video,
136
+ inputs=[video_input],
137
+ outputs=[result_text, emotion_chart, processed_video]
138
+ )
139
 
140
+ # Launch Gradio app
141
+ demo.launch()