Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -195,6 +195,29 @@ def process_clip_from_file(prompt, frames, chatbot, fps, video_path, id):
|
|
195 |
|
196 |
return chatbot
|
197 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
# Function to capture video frames
|
199 |
def analyze_stream(prompt, stream, chatbot):
|
200 |
global stop_capture
|
@@ -263,6 +286,49 @@ def analyze_video_file(prompt, video_path, chatbot):
|
|
263 |
yield result
|
264 |
return chatbot
|
265 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
|
267 |
# Function to stop video capture
|
268 |
def stop_capture_func():
|
@@ -291,5 +357,13 @@ with gr.Blocks(title="Conntour", fill_height=True) as demo:
|
|
291 |
stop_btn = gr.Button("Stop")
|
292 |
start_btn.click(analyze_stream, inputs=[prompt, stream, chatbot], outputs=[chatbot], queue=True)
|
293 |
stop_btn.click(stop_capture_func)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
|
295 |
-
demo.launch(favicon_path='favicon.ico', auth=(user_name, password))
|
|
|
195 |
|
196 |
return chatbot
|
197 |
|
198 |
+
# New synchronous function to process video clips and return events
|
199 |
+
def process_clip_from_file_sync(prompt, frames, fps, video_path, id):
|
200 |
+
global stop_capture
|
201 |
+
if not stop_capture:
|
202 |
+
israel_tz = pytz.timezone('Asia/Jerusalem')
|
203 |
+
start_time = datetime.now(israel_tz).strftime('%H:%M:%S')
|
204 |
+
print("[Start]:", start_time, len(frames))
|
205 |
+
|
206 |
+
frames_to_skip = int(fps)
|
207 |
+
base64Frames = process_frames(frames, frames_to_skip)
|
208 |
+
frames_count, processing_time, api_response = check_condition(prompt, base64Frames)
|
209 |
+
|
210 |
+
if api_response and api_response.get("condition_met", False):
|
211 |
+
video_clip_path = clip_video_segment_2(video_path, id*LENGTH, LENGTH)
|
212 |
+
event = {
|
213 |
+
'event_id': id + 1,
|
214 |
+
'video_clip_path': video_clip_path,
|
215 |
+
'start_time': start_time,
|
216 |
+
'details': api_response.get('details', '')
|
217 |
+
}
|
218 |
+
return event
|
219 |
+
return None
|
220 |
+
|
221 |
# Function to capture video frames
|
222 |
def analyze_stream(prompt, stream, chatbot):
|
223 |
global stop_capture
|
|
|
286 |
yield result
|
287 |
return chatbot
|
288 |
|
289 |
+
# New function to analyze video file synchronously and return events
|
290 |
+
def analyze_video_file_sync(prompt, video_path):
|
291 |
+
global stop_capture
|
292 |
+
stop_capture = False # Reset the stop flag when analysis starts
|
293 |
+
|
294 |
+
cap = cv2.VideoCapture(video_path)
|
295 |
+
|
296 |
+
# Get video properties
|
297 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS)) # Frames per second
|
298 |
+
frames_per_chunk = fps * LENGTH # Number of frames per LENGTH-second chunk
|
299 |
+
|
300 |
+
frames = []
|
301 |
+
chunk = 0
|
302 |
+
events = []
|
303 |
+
|
304 |
+
# Create a thread pool for concurrent processing
|
305 |
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
306 |
+
futures = []
|
307 |
+
|
308 |
+
while not stop_capture:
|
309 |
+
ret, frame = cap.read()
|
310 |
+
if not ret:
|
311 |
+
break
|
312 |
+
frames.append(frame)
|
313 |
+
|
314 |
+
# Split the video into chunks of frames corresponding to LENGTH seconds
|
315 |
+
if len(frames) >= frames_per_chunk:
|
316 |
+
futures.append(executor.submit(process_clip_from_file_sync, prompt, frames.copy(), fps, video_path, chunk))
|
317 |
+
frames = []
|
318 |
+
chunk+=1
|
319 |
+
|
320 |
+
# If any remaining frames that are less than LENGTH seconds, process them as a final chunk
|
321 |
+
if len(frames) > 0:
|
322 |
+
futures.append(executor.submit(process_clip_from_file_sync, prompt, frames.copy(), fps, video_path, chunk))
|
323 |
+
chunk+=1
|
324 |
+
|
325 |
+
cap.release()
|
326 |
+
# Collect results as threads complete
|
327 |
+
for future in as_completed(futures):
|
328 |
+
result = future.result()
|
329 |
+
if result is not None:
|
330 |
+
events.append(result)
|
331 |
+
return events
|
332 |
|
333 |
# Function to stop video capture
|
334 |
def stop_capture_func():
|
|
|
357 |
stop_btn = gr.Button("Stop")
|
358 |
start_btn.click(analyze_stream, inputs=[prompt, stream, chatbot], outputs=[chatbot], queue=True)
|
359 |
stop_btn.click(stop_capture_func)
|
360 |
+
# Add new API endpoint (without UI components)
|
361 |
+
api_interface = gr.Interface(
|
362 |
+
fn=analyze_video_file_sync,
|
363 |
+
inputs=[gr.Textbox(label="Prompt"), gr.Video(label="Video File")],
|
364 |
+
outputs=gr.JSON(label="Captured Events"),
|
365 |
+
live=False,
|
366 |
+
)
|
367 |
+
demo.add_component(api_interface)
|
368 |
|
369 |
+
demo.launch(favicon_path='favicon.ico', auth=(user_name, password))
|