ahmedghani commited on
Commit
bed6951
Β·
1 Parent(s): 6933075

completed codebase

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. .gitignore +2 -0
  2. {florence_sam/RAFT β†’ RAFT}/__init__.py +0 -0
  3. {florence_sam/RAFT β†’ RAFT}/corr.py +0 -0
  4. {florence_sam/RAFT β†’ RAFT}/datasets.py +0 -0
  5. {florence_sam/RAFT β†’ RAFT}/demo.py +0 -0
  6. {florence_sam/RAFT β†’ RAFT}/extractor.py +0 -0
  7. {florence_sam/RAFT β†’ RAFT}/raft.py +0 -0
  8. {florence_sam/RAFT β†’ RAFT}/update.py +0 -0
  9. {florence_sam/RAFT β†’ RAFT}/utils/__init__.py +0 -0
  10. {florence_sam/RAFT β†’ RAFT}/utils/augmentor.py +0 -0
  11. {florence_sam/RAFT β†’ RAFT}/utils/flow_viz.py +0 -0
  12. {florence_sam/RAFT β†’ RAFT}/utils/flow_viz_pt.py +0 -0
  13. {florence_sam/RAFT β†’ RAFT}/utils/frame_utils.py +0 -0
  14. {florence_sam/RAFT β†’ RAFT}/utils/utils.py +0 -0
  15. florence_sam/app.py β†’ app.py +9 -10
  16. {florence_sam/checkpoints β†’ checkpoints}/sam2_hiera_base_plus.pt +0 -0
  17. {florence_sam/checkpoints β†’ checkpoints}/sam2_hiera_large.pt +0 -0
  18. {florence_sam/checkpoints β†’ checkpoints}/sam2_hiera_small.pt +0 -0
  19. {florence_sam/checkpoints β†’ checkpoints}/sam2_hiera_tiny.pt +0 -0
  20. {florence_sam/configs β†’ configs}/__init__.py +0 -0
  21. {florence_sam/configs β†’ configs}/sam2_hiera_b+.yaml +0 -0
  22. {florence_sam/configs β†’ configs}/sam2_hiera_l.yaml +0 -0
  23. {florence_sam/configs β†’ configs}/sam2_hiera_s.yaml +0 -0
  24. {florence_sam/configs β†’ configs}/sam2_hiera_t.yaml +0 -0
  25. {florence_sam/configs β†’ configs}/train_flowcomp.json +0 -0
  26. {florence_sam/configs β†’ configs}/train_propainter.json +0 -0
  27. {florence_sam/core β†’ core}/dataset.py +0 -0
  28. {florence_sam/core β†’ core}/dist.py +0 -0
  29. {florence_sam/core β†’ core}/loss.py +0 -0
  30. {florence_sam/core β†’ core}/lr_scheduler.py +0 -0
  31. {florence_sam/core β†’ core}/metrics.py +0 -0
  32. {florence_sam/core β†’ core}/prefetch_dataloader.py +0 -0
  33. {florence_sam/core β†’ core}/trainer.py +0 -0
  34. {florence_sam/core β†’ core}/trainer_flow_w_edge.py +0 -0
  35. {florence_sam/core β†’ core}/utils.py +0 -0
  36. {florence_sam/datasets β†’ datasets}/davis/test.json +0 -0
  37. {florence_sam/datasets β†’ datasets}/davis/train.json +0 -0
  38. {florence_sam/datasets β†’ datasets}/youtube-vos/test.json +0 -0
  39. {florence_sam/datasets β†’ datasets}/youtube-vos/train.json +0 -0
  40. florence_sam/experiment.ipynb β†’ experiment.ipynb +0 -0
  41. florence_sam/florancesam_pipeline.py β†’ florancesam_pipeline.py +14 -9
  42. florence_sam/README.md +0 -3
  43. florence_sam/requirements.txt +0 -12
  44. florence_sam/inference_propainter.py β†’ inference_propainter.py +0 -0
  45. florence_sam/main.py β†’ main.py +8 -6
  46. {florence_sam/model β†’ model}/__init__.py +0 -0
  47. {florence_sam/model β†’ model}/canny/canny_filter.py +0 -0
  48. {florence_sam/model β†’ model}/canny/filter.py +0 -0
  49. {florence_sam/model β†’ model}/canny/gaussian.py +0 -0
  50. {florence_sam/model β†’ model}/canny/kernels.py +0 -0
.gitignore CHANGED
@@ -160,3 +160,5 @@ cython_debug/
160
  # and can be added to the global gitignore or merged into this file. For a more nuclear
161
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
  #.idea/
 
 
 
160
  # and can be added to the global gitignore or merged into this file. For a more nuclear
161
  # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
  #.idea/
163
+
164
+ .gradio/
{florence_sam/RAFT β†’ RAFT}/__init__.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/corr.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/datasets.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/demo.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/extractor.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/raft.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/update.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/utils/__init__.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/utils/augmentor.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/utils/flow_viz.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/utils/flow_viz_pt.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/utils/frame_utils.py RENAMED
File without changes
{florence_sam/RAFT β†’ RAFT}/utils/utils.py RENAMED
File without changes
florence_sam/app.py β†’ app.py RENAMED
@@ -2,26 +2,25 @@ from main import infer
2
  import moviepy.editor as mp
3
  import gradio as gr
4
  import os
 
5
 
6
  def pre_processor(video_path, scale_factor, prompt, crop_duration):
7
  video = mp.VideoFileClip(video_path)
8
-
9
  cropped_video = video.subclip(0, min(crop_duration, video.duration))
10
 
11
- temp_output = "cropped_video.mp4"
12
- cropped_video.write_videofile(temp_output, codec="libx264")
 
13
 
14
- output = infer(temp_output, scale_factor, prompt)
15
 
16
- # Clean up temporary files
17
- if os.path.exists(temp_output):
18
- os.remove(temp_output)
19
 
20
  return output
21
 
22
-
23
  demo = gr.Interface(
24
- title="Text based video inpainting",
25
  fn=pre_processor,
26
  inputs=[
27
  gr.Video(label="Upload Video"),
@@ -32,4 +31,4 @@ demo = gr.Interface(
32
  outputs="video"
33
  )
34
 
35
- demo.launch()
 
2
  import moviepy.editor as mp
3
  import gradio as gr
4
  import os
5
+ import tempfile
6
 
7
  def pre_processor(video_path, scale_factor, prompt, crop_duration):
8
  video = mp.VideoFileClip(video_path)
 
9
  cropped_video = video.subclip(0, min(crop_duration, video.duration))
10
 
11
+ with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_file:
12
+ temp_output = temp_file.name
13
+ cropped_video.write_videofile(temp_output, codec="libx264")
14
 
15
+ output = infer(temp_output, scale_factor, prompt)
16
 
17
+ # Clean up temporary file
18
+ os.unlink(temp_output)
 
19
 
20
  return output
21
 
 
22
  demo = gr.Interface(
23
+ title="Text Based Video Inpainting πŸ”₯ (SAM2+Florance2+ProPainter)",
24
  fn=pre_processor,
25
  inputs=[
26
  gr.Video(label="Upload Video"),
 
31
  outputs="video"
32
  )
33
 
34
+ demo.launch(server_port=7555)
{florence_sam/checkpoints β†’ checkpoints}/sam2_hiera_base_plus.pt RENAMED
File without changes
{florence_sam/checkpoints β†’ checkpoints}/sam2_hiera_large.pt RENAMED
File without changes
{florence_sam/checkpoints β†’ checkpoints}/sam2_hiera_small.pt RENAMED
File without changes
{florence_sam/checkpoints β†’ checkpoints}/sam2_hiera_tiny.pt RENAMED
File without changes
{florence_sam/configs β†’ configs}/__init__.py RENAMED
File without changes
{florence_sam/configs β†’ configs}/sam2_hiera_b+.yaml RENAMED
File without changes
{florence_sam/configs β†’ configs}/sam2_hiera_l.yaml RENAMED
File without changes
{florence_sam/configs β†’ configs}/sam2_hiera_s.yaml RENAMED
File without changes
{florence_sam/configs β†’ configs}/sam2_hiera_t.yaml RENAMED
File without changes
{florence_sam/configs β†’ configs}/train_flowcomp.json RENAMED
File without changes
{florence_sam/configs β†’ configs}/train_propainter.json RENAMED
File without changes
{florence_sam/core β†’ core}/dataset.py RENAMED
File without changes
{florence_sam/core β†’ core}/dist.py RENAMED
File without changes
{florence_sam/core β†’ core}/loss.py RENAMED
File without changes
{florence_sam/core β†’ core}/lr_scheduler.py RENAMED
File without changes
{florence_sam/core β†’ core}/metrics.py RENAMED
File without changes
{florence_sam/core β†’ core}/prefetch_dataloader.py RENAMED
File without changes
{florence_sam/core β†’ core}/trainer.py RENAMED
File without changes
{florence_sam/core β†’ core}/trainer_flow_w_edge.py RENAMED
File without changes
{florence_sam/core β†’ core}/utils.py RENAMED
File without changes
{florence_sam/datasets β†’ datasets}/davis/test.json RENAMED
File without changes
{florence_sam/datasets β†’ datasets}/davis/train.json RENAMED
File without changes
{florence_sam/datasets β†’ datasets}/youtube-vos/test.json RENAMED
File without changes
{florence_sam/datasets β†’ datasets}/youtube-vos/train.json RENAMED
File without changes
florence_sam/experiment.ipynb β†’ experiment.ipynb RENAMED
File without changes
florence_sam/florancesam_pipeline.py β†’ florancesam_pipeline.py RENAMED
@@ -52,15 +52,15 @@ class VideoProcessor:
52
  self.scale_factor = scale_factor
53
 
54
  # Process video based on the prompt
55
- output_video_path, session_path = self._process_prompt(video_path, prompt)
56
 
57
  # Create frames from the output video
58
- fps = self._create_frames(output_video_path, os.path.join(session_path, "output_frames"))
59
 
60
  # Delete the output video
61
  os.remove(output_video_path)
62
 
63
- return session_path, fps
64
 
65
  def _create_frames(self, video_path, output_dir):
66
  create_directory(output_dir)
@@ -119,10 +119,14 @@ class VideoProcessor:
119
 
120
  # Generate unique name for video processing
121
  name = generate_unique_name()
122
- session_path = os.path.join("tmp", name)
123
- create_directory(session_path)
124
- frame_directory_path = os.path.join(session_path, "input_frames")
125
- create_directory(frame_directory_path)
 
 
 
 
126
 
127
  frames_sink = sv.ImageSink(
128
  target_dir_path=frame_directory_path,
@@ -157,7 +161,8 @@ class VideoProcessor:
157
  )
158
 
159
  # Create output video path
160
- output_video_path = os.path.join("tmp", f"{name}.mp4")
 
161
  frames_generator = sv.get_video_frames_generator(video_path)
162
  masks_generator = self.sam_video_model.propagate_in_video(inference_state)
163
 
@@ -185,7 +190,7 @@ class VideoProcessor:
185
  annotated_frame = (annotated_frame > 0).astype(np.uint8) * 255
186
  sink.write_frame(annotated_frame)
187
 
188
- return output_video_path, session_path
189
 
190
  #Example usage
191
  # output_video = video_processor.process_video(
 
52
  self.scale_factor = scale_factor
53
 
54
  # Process video based on the prompt
55
+ output_video_path, session_path, input_frames_dir, output_directory_path = self._process_prompt(video_path, prompt)
56
 
57
  # Create frames from the output video
58
+ fps = self._create_frames(output_video_path, output_directory_path)
59
 
60
  # Delete the output video
61
  os.remove(output_video_path)
62
 
63
+ return session_path, fps, input_frames_dir, output_directory_path
64
 
65
  def _create_frames(self, video_path, output_dir):
66
  create_directory(output_dir)
 
119
 
120
  # Generate unique name for video processing
121
  name = generate_unique_name()
122
+ # session_path = os.path.join("tmp", name)
123
+ # create_directory(session_path)
124
+ # frame_directory_path = os.path.join(session_path, "input_frames")
125
+ # create_directory(frame_directory_path)
126
+ import tempfile
127
+ session_path = tempfile.mkdtemp(prefix="video_processing_")
128
+ frame_directory_path = tempfile.mkdtemp(prefix="input_frames_", dir=session_path)
129
+ output_directory_path = tempfile.mkdtemp(prefix="output_frames_", dir=session_path)
130
 
131
  frames_sink = sv.ImageSink(
132
  target_dir_path=frame_directory_path,
 
161
  )
162
 
163
  # Create output video path
164
+ # output_video_path = os.path.join("tmp", f"{name}.mp4")
165
+ output_video_path = os.path.join(session_path, f"{name}.mp4")
166
  frames_generator = sv.get_video_frames_generator(video_path)
167
  masks_generator = self.sam_video_model.propagate_in_video(inference_state)
168
 
 
190
  annotated_frame = (annotated_frame > 0).astype(np.uint8) * 255
191
  sink.write_frame(annotated_frame)
192
 
193
+ return output_video_path, session_path, frame_directory_path, output_directory_path
194
 
195
  #Example usage
196
  # output_video = video_processor.process_video(
florence_sam/README.md DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:031c6f979f28b8b0f3b42af389fd1f741046ca89de9bc05a33912db7e30e8741
3
- size 258
 
 
 
 
florence_sam/requirements.txt DELETED
@@ -1,12 +0,0 @@
1
- tqdm
2
- einops
3
- spaces
4
- timm
5
- transformers
6
- samv2
7
- gradio
8
- supervision
9
- opencv-python
10
- pytest
11
- imageio
12
- moviepy
 
 
 
 
 
 
 
 
 
 
 
 
 
florence_sam/inference_propainter.py β†’ inference_propainter.py RENAMED
File without changes
florence_sam/main.py β†’ main.py RENAMED
@@ -3,6 +3,7 @@ import shutil
3
  import torch
4
  from propainter_pipeline import process_video
5
  from florancesam_pipeline import VideoProcessor
 
6
 
7
  video_processor = VideoProcessor()
8
  video_processor._enable_mixed_precision()
@@ -12,7 +13,7 @@ def infer(video_path, scale_factor, prompt):
12
  video_processor._enable_mixed_precision()
13
  # --------------------------------------------------------------------------------
14
  print("Processing video with FlorenceSam...")
15
- session_path, fps = video_processor.process_video(
16
  video_path=video_path,
17
  scale_factor=scale_factor,
18
  prompt=prompt
@@ -26,13 +27,14 @@ def infer(video_path, scale_factor, prompt):
26
  torch.cuda.empty_cache()
27
 
28
  print("Processing video with ProPainter...")
29
- process_video(video=os.path.join(session_path, "input_frames"), mask=os.path.join(session_path, "output_frames"), save_fps=int(fps), fp16=True)
 
30
 
31
- # remove intermediate files
32
- shutil.rmtree("tmp")
33
  torch.cuda.empty_cache()
34
-
35
- return "results/input_frames/inpaint_out.mp4"
36
 
37
  if __name__ == "__main__":
38
  infer("/home/ubuntu/ahmedghani/clip-07-camera-2.mp4", 0.5, "players, basketball, rim, players shadow")
 
3
  import torch
4
  from propainter_pipeline import process_video
5
  from florancesam_pipeline import VideoProcessor
6
+ import tempfile
7
 
8
  video_processor = VideoProcessor()
9
  video_processor._enable_mixed_precision()
 
13
  video_processor._enable_mixed_precision()
14
  # --------------------------------------------------------------------------------
15
  print("Processing video with FlorenceSam...")
16
+ session_path, fps, input_frames_dir, output_frames_dir = video_processor.process_video(
17
  video_path=video_path,
18
  scale_factor=scale_factor,
19
  prompt=prompt
 
27
  torch.cuda.empty_cache()
28
 
29
  print("Processing video with ProPainter...")
30
+ result_path = tempfile.mkdtemp()
31
+ inpainted_video = process_video(video=input_frames_dir, mask=output_frames_dir, save_fps=int(fps), fp16=True, output=result_path)
32
 
33
+ shutil.rmtree(input_frames_dir)
34
+ shutil.rmtree(output_frames_dir)
35
  torch.cuda.empty_cache()
36
+
37
+ return inpainted_video
38
 
39
  if __name__ == "__main__":
40
  infer("/home/ubuntu/ahmedghani/clip-07-camera-2.mp4", 0.5, "players, basketball, rim, players shadow")
{florence_sam/model β†’ model}/__init__.py RENAMED
File without changes
{florence_sam/model β†’ model}/canny/canny_filter.py RENAMED
File without changes
{florence_sam/model β†’ model}/canny/filter.py RENAMED
File without changes
{florence_sam/model β†’ model}/canny/gaussian.py RENAMED
File without changes
{florence_sam/model β†’ model}/canny/kernels.py RENAMED
File without changes