reab5555 commited on
Commit
0c0a9ed
·
verified ·
1 Parent(s): 5a4c973

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -13
app.py CHANGED
@@ -3,11 +3,14 @@ import time
3
  from video_processing import process_video
4
  from PIL import Image
5
  import matplotlib
 
6
 
7
  matplotlib.rcParams['figure.dpi'] = 300
8
  matplotlib.rcParams['savefig.dpi'] = 300
9
 
10
  def process_and_show_completion(video_input_path, anomaly_threshold_input, fps, progress=gr.Progress()):
 
 
11
  start_time = time.time()
12
 
13
  try:
@@ -17,6 +20,7 @@ def process_and_show_completion(video_input_path, anomaly_threshold_input, fps,
17
 
18
  if isinstance(results[0], str) and results[0].startswith("Error"):
19
  print(f"Error occurred: {results[0]}")
 
20
  return [results[0]] + [None] * 27
21
 
22
  exec_time, results_summary, df, mse_embeddings, mse_posture, mse_voice, \
@@ -37,7 +41,7 @@ def process_and_show_completion(video_input_path, anomaly_threshold_input, fps,
37
  total_exec_time = end_time - start_time
38
 
39
  output = [
40
- f"Execution time: {total_exec_time:.2f} seconds", results_summary,
41
  df, mse_embeddings, mse_posture, mse_voice,
42
  mse_plot_embeddings, mse_plot_posture, mse_plot_voice,
43
  mse_histogram_embeddings, mse_histogram_posture, mse_histogram_voice,
@@ -49,6 +53,7 @@ def process_and_show_completion(video_input_path, anomaly_threshold_input, fps,
49
  heatmap_video_path, combined_mse_plot, correlation_heatmap
50
  ]
51
 
 
52
  return output
53
 
54
  except Exception as e:
@@ -56,21 +61,23 @@ def process_and_show_completion(video_input_path, anomaly_threshold_input, fps,
56
  print(error_message)
57
  import traceback
58
  traceback.print_exc()
 
59
  return [error_message] + [None] * 27
60
 
61
  def show_results():
62
  return [gr.update(visible=True) for _ in range(4)]
63
 
64
  def start_execution_timer():
65
- return gr.update(value="Execution time: 0 seconds", visible=True)
66
 
67
- def update_execution_time(start_time):
68
  current_time = time.time() - start_time
69
  return f"Execution time: {current_time:.2f} seconds"
70
 
71
- with gr.Blocks() as iface:
72
- start_time = gr.State(0)
73
 
 
74
  with gr.Row():
75
  video_input = gr.Video(label="Input Video")
76
 
@@ -88,6 +95,7 @@ with gr.Blocks() as iface:
88
  This tool detects anomalies in facial expressions, body language, and voice over the timeline of a video.
89
  It extracts faces, postures, and voice from video frames, and analyzes them to identify anomalies using time series analysis and a variational autoencoder (VAE) approach.
90
  """)
 
91
 
92
  facial_features_tab = gr.Tab("Facial Features", visible=False)
93
  with facial_features_tab:
@@ -127,19 +135,24 @@ with gr.Blocks() as iface:
127
  mse_heatmap_posture_store = gr.State()
128
  mse_heatmap_voice_store = gr.State()
129
 
 
 
 
 
 
130
  process_btn.click(
131
- lambda: time.time(),
132
  inputs=None,
133
- outputs=start_time
134
  ).then(
135
  start_execution_timer,
136
  inputs=None,
137
- outputs=description_md
138
  ).then(
139
  process_and_show_completion,
140
  inputs=[video_input, anomaly_threshold, fps_slider],
141
  outputs=[
142
- description_md, results_text, df_store,
143
  mse_features_store, mse_posture_store, mse_voice_store,
144
  mse_features_plot, mse_posture_plot, mse_voice_plot,
145
  mse_features_hist, mse_posture_hist, mse_voice_hist,
@@ -156,11 +169,11 @@ with gr.Blocks() as iface:
156
  outputs=all_tabs.children[1:]
157
  )
158
 
159
- description_md.change(
160
  update_execution_time,
161
- inputs=[start_time],
162
- outputs=description_md,
163
- every=1
164
  )
165
 
166
  if __name__ == "__main__":
 
3
  from video_processing import process_video
4
  from PIL import Image
5
  import matplotlib
6
+ import threading
7
 
8
  matplotlib.rcParams['figure.dpi'] = 300
9
  matplotlib.rcParams['savefig.dpi'] = 300
10
 
11
  def process_and_show_completion(video_input_path, anomaly_threshold_input, fps, progress=gr.Progress()):
12
+ global processing
13
+ processing = True
14
  start_time = time.time()
15
 
16
  try:
 
20
 
21
  if isinstance(results[0], str) and results[0].startswith("Error"):
22
  print(f"Error occurred: {results[0]}")
23
+ processing = False
24
  return [results[0]] + [None] * 27
25
 
26
  exec_time, results_summary, df, mse_embeddings, mse_posture, mse_voice, \
 
41
  total_exec_time = end_time - start_time
42
 
43
  output = [
44
+ f"Total execution time: {total_exec_time:.2f} seconds", results_summary,
45
  df, mse_embeddings, mse_posture, mse_voice,
46
  mse_plot_embeddings, mse_plot_posture, mse_plot_voice,
47
  mse_histogram_embeddings, mse_histogram_posture, mse_histogram_voice,
 
53
  heatmap_video_path, combined_mse_plot, correlation_heatmap
54
  ]
55
 
56
+ processing = False
57
  return output
58
 
59
  except Exception as e:
 
61
  print(error_message)
62
  import traceback
63
  traceback.print_exc()
64
+ processing = False
65
  return [error_message] + [None] * 27
66
 
67
  def show_results():
68
  return [gr.update(visible=True) for _ in range(4)]
69
 
70
  def start_execution_timer():
71
+ return gr.update(visible=True), gr.update(visible=False)
72
 
73
+ def update_execution_time():
74
  current_time = time.time() - start_time
75
  return f"Execution time: {current_time:.2f} seconds"
76
 
77
+ processing = False
78
+ start_time = 0
79
 
80
+ with gr.Blocks() as iface:
81
  with gr.Row():
82
  video_input = gr.Video(label="Input Video")
83
 
 
95
  This tool detects anomalies in facial expressions, body language, and voice over the timeline of a video.
96
  It extracts faces, postures, and voice from video frames, and analyzes them to identify anomalies using time series analysis and a variational autoencoder (VAE) approach.
97
  """)
98
+ execution_time_md = gr.Markdown(visible=False)
99
 
100
  facial_features_tab = gr.Tab("Facial Features", visible=False)
101
  with facial_features_tab:
 
135
  mse_heatmap_posture_store = gr.State()
136
  mse_heatmap_voice_store = gr.State()
137
 
138
+ def start_processing():
139
+ global start_time, processing
140
+ start_time = time.time()
141
+ processing = True
142
+
143
  process_btn.click(
144
+ start_processing,
145
  inputs=None,
146
+ outputs=None
147
  ).then(
148
  start_execution_timer,
149
  inputs=None,
150
+ outputs=[execution_time_md, description_md]
151
  ).then(
152
  process_and_show_completion,
153
  inputs=[video_input, anomaly_threshold, fps_slider],
154
  outputs=[
155
+ execution_time_md, results_text, df_store,
156
  mse_features_store, mse_posture_store, mse_voice_store,
157
  mse_features_plot, mse_posture_plot, mse_voice_plot,
158
  mse_features_hist, mse_posture_hist, mse_voice_hist,
 
169
  outputs=all_tabs.children[1:]
170
  )
171
 
172
+ execution_time_md.change(
173
  update_execution_time,
174
+ inputs=None,
175
+ outputs=execution_time_md,
176
+ every=0.1
177
  )
178
 
179
  if __name__ == "__main__":