SpyC0der77 commited on
Commit
2b55597
·
verified ·
1 Parent(s): 29b5ebe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -12,7 +12,7 @@ import threading
12
  # Global status and result dictionaries.
13
  status = {
14
  "logs": "",
15
- "progress": 0, # 0 to 100
16
  "finished": False
17
  }
18
  result = {
@@ -24,7 +24,7 @@ result = {
24
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
  print(f"[INFO] Using device: {device}")
26
 
27
- # Try to load the RAFT model. If it fails, we fall back to Farneback.
28
  try:
29
  print("[INFO] Attempting to load RAFT model from torch.hub...")
30
  raft_model = torch.hub.load("princeton-vl/RAFT", "raft_small", pretrained=True, trust_repo=True)
@@ -37,7 +37,7 @@ except Exception as e:
37
  raft_model = None
38
 
39
  def append_log(msg):
40
- """Helper to append a log message to the global status."""
41
  global status
42
  status["logs"] += msg + "\n"
43
  print(msg)
@@ -45,7 +45,7 @@ def append_log(msg):
45
  def background_process(video_file, zoom):
46
  """
47
  Runs the full processing: generates a motion CSV using RAFT (or Farneback)
48
- and then stabilizes the video. Updates the global status and result.
49
  """
50
  global status, result
51
 
@@ -110,7 +110,7 @@ def background_process(video_file, zoom):
110
  prev_gray = curr_gray
111
 
112
  # Compute median magnitude and angle.
113
- mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1], angleInDegrees=True)
114
  median_mag = np.median(mag)
115
  median_ang = np.median(ang)
116
  # Compute zoom factor: fraction of pixels moving away from the center.
@@ -203,7 +203,7 @@ def start_processing(video_file, zoom):
203
  """Starts background processing in a new thread."""
204
  thread = threading.Thread(target=background_process, args=(video_file, zoom), daemon=True)
205
  thread.start()
206
- return "Processing started."
207
 
208
  def poll_status():
209
  """
@@ -218,7 +218,7 @@ def poll_status():
218
  # Build the Gradio UI.
219
  with gr.Blocks() as demo:
220
  gr.Markdown("# AI-Powered Video Stabilization")
221
- gr.Markdown("Upload a video and select a zoom factor. Click **Process Video** to start processing in the background. Then click **Refresh Status** to update the logs and progress (once processing finishes, the stabilized video will be shown).")
222
 
223
  with gr.Row():
224
  with gr.Column():
@@ -230,11 +230,11 @@ with gr.Blocks() as demo:
230
  stabilized_video = gr.Video(label="Stabilized Video")
231
  logs_output = gr.Textbox(label="Logs", lines=15)
232
  progress_bar = gr.Slider(label="Progress", minimum=0, maximum=100, value=0, interactive=False)
233
- refresh_button = gr.Button("Refresh Status")
234
 
235
- # When "Process Video" is clicked, start processing.
236
  start_button.click(fn=start_processing, inputs=[video_input, zoom_slider], outputs=[logs_output])
237
- # When "Refresh Status" is clicked, update logs, progress, and videos.
238
- refresh_button.click(fn=poll_status, inputs=[], outputs=[original_video, stabilized_video, logs_output, progress_bar])
 
239
 
240
  demo.launch()
 
12
  # Global status and result dictionaries.
13
  status = {
14
  "logs": "",
15
+ "progress": 0, # from 0 to 100
16
  "finished": False
17
  }
18
  result = {
 
24
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
  print(f"[INFO] Using device: {device}")
26
 
27
+ # Try to load the RAFT model. If it fails, fall back to OpenCV Farneback.
28
  try:
29
  print("[INFO] Attempting to load RAFT model from torch.hub...")
30
  raft_model = torch.hub.load("princeton-vl/RAFT", "raft_small", pretrained=True, trust_repo=True)
 
37
  raft_model = None
38
 
39
  def append_log(msg):
40
+ """Append a log message to the global status and print it."""
41
  global status
42
  status["logs"] += msg + "\n"
43
  print(msg)
 
45
  def background_process(video_file, zoom):
46
  """
47
  Runs the full processing: generates a motion CSV using RAFT (or Farneback)
48
+ and then stabilizes the video. Updates global status and result.
49
  """
50
  global status, result
51
 
 
110
  prev_gray = curr_gray
111
 
112
  # Compute median magnitude and angle.
113
+ mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1], angleInDegrees=True)
114
  median_mag = np.median(mag)
115
  median_ang = np.median(ang)
116
  # Compute zoom factor: fraction of pixels moving away from the center.
 
203
  """Starts background processing in a new thread."""
204
  thread = threading.Thread(target=background_process, args=(video_file, zoom), daemon=True)
205
  thread.start()
206
+ return "[INFO] Processing started..."
207
 
208
  def poll_status():
209
  """
 
218
  # Build the Gradio UI.
219
  with gr.Blocks() as demo:
220
  gr.Markdown("# AI-Powered Video Stabilization")
221
+ gr.Markdown("Upload a video and select a zoom factor. Processing starts automatically and progress/logs update every 2 seconds.")
222
 
223
  with gr.Row():
224
  with gr.Column():
 
230
  stabilized_video = gr.Video(label="Stabilized Video")
231
  logs_output = gr.Textbox(label="Logs", lines=15)
232
  progress_bar = gr.Slider(label="Progress", minimum=0, maximum=100, value=0, interactive=False)
 
233
 
234
+ # When "Process Video" is clicked, start processing in the background.
235
  start_button.click(fn=start_processing, inputs=[video_input, zoom_slider], outputs=[logs_output])
236
+
237
+ # Automatically poll status every 2 seconds.
238
+ auto_poll = gr.Autoupdate(interval=2000, function=poll_status, outputs=[original_video, stabilized_video, logs_output, progress_bar])
239
 
240
  demo.launch()