Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -234,7 +234,7 @@ def analyze_stream(prompt, stream, chatbot):
|
|
234 |
break
|
235 |
frames.append(frame)
|
236 |
|
237 |
-
# Sample the frames every
|
238 |
if time.time() - start_time >= LENGTH:
|
239 |
# Start a new thread for processing the video clip
|
240 |
Thread(target=process_clip, args=(prompt, frames.copy(), chatbot,)).start()
|
@@ -253,7 +253,7 @@ def analyze_video_file(prompt, video_path, chatbot):
|
|
253 |
|
254 |
# Get video properties
|
255 |
fps = int(cap.get(cv2.CAP_PROP_FPS)) # Frames per second
|
256 |
-
frames_per_chunk = fps * LENGTH # Number of frames per
|
257 |
|
258 |
frames = []
|
259 |
chunk = 0
|
@@ -268,13 +268,13 @@ def analyze_video_file(prompt, video_path, chatbot):
|
|
268 |
break
|
269 |
frames.append(frame)
|
270 |
|
271 |
-
# Split the video into chunks of frames corresponding to
|
272 |
if len(frames) >= frames_per_chunk:
|
273 |
futures.append(executor.submit(process_clip_from_file, prompt, frames.copy(), chatbot, fps, video_path, chunk))
|
274 |
frames = []
|
275 |
chunk+=1
|
276 |
|
277 |
-
# If any remaining frames that are less than
|
278 |
if len(frames) > 0:
|
279 |
futures.append(executor.submit(process_clip_from_file, prompt, frames.copy(), chatbot, fps, video_path, chunk))
|
280 |
chunk+=1
|
@@ -358,12 +358,12 @@ with gr.Blocks(title="Conntour", fill_height=True) as demo:
|
|
358 |
start_btn.click(analyze_stream, inputs=[prompt, stream, chatbot], outputs=[chatbot], queue=True)
|
359 |
stop_btn.click(stop_capture_func)
|
360 |
# Add new API endpoint (without UI components)
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
|
369 |
demo.launch(favicon_path='favicon.ico', auth=(user_name, password))
|
|
|
234 |
break
|
235 |
frames.append(frame)
|
236 |
|
237 |
+
# Sample the frames every LENGTH seconds
|
238 |
if time.time() - start_time >= LENGTH:
|
239 |
# Start a new thread for processing the video clip
|
240 |
Thread(target=process_clip, args=(prompt, frames.copy(), chatbot,)).start()
|
|
|
253 |
|
254 |
# Get video properties
|
255 |
fps = int(cap.get(cv2.CAP_PROP_FPS)) # Frames per second
|
256 |
+
frames_per_chunk = fps * LENGTH # Number of frames per LENGTH-second chunk
|
257 |
|
258 |
frames = []
|
259 |
chunk = 0
|
|
|
268 |
break
|
269 |
frames.append(frame)
|
270 |
|
271 |
+
# Split the video into chunks of frames corresponding to LENGTH seconds
|
272 |
if len(frames) >= frames_per_chunk:
|
273 |
futures.append(executor.submit(process_clip_from_file, prompt, frames.copy(), chatbot, fps, video_path, chunk))
|
274 |
frames = []
|
275 |
chunk+=1
|
276 |
|
277 |
+
# If any remaining frames that are less than LENGTH seconds, process them as a final chunk
|
278 |
if len(frames) > 0:
|
279 |
futures.append(executor.submit(process_clip_from_file, prompt, frames.copy(), chatbot, fps, video_path, chunk))
|
280 |
chunk+=1
|
|
|
358 |
start_btn.click(analyze_stream, inputs=[prompt, stream, chatbot], outputs=[chatbot], queue=True)
|
359 |
stop_btn.click(stop_capture_func)
|
360 |
# Add new API endpoint (without UI components)
|
361 |
+
with gr.Row(visible=False) as hidden_api:
|
362 |
+
api_prompt = gr.Textbox(label="Prompt")
|
363 |
+
api_video = gr.Video(label="Video File")
|
364 |
+
api_output = gr.JSON(label="Captured Events")
|
365 |
+
api_btn = gr.Button("Analyze Video File")
|
366 |
+
|
367 |
+
api_btn.click(analyze_video_file_sync, inputs=[api_prompt, api_video], outputs=[api_output])
|
368 |
|
369 |
demo.launch(favicon_path='favicon.ico', auth=(user_name, password))
|