sanghan commited on
Commit
bae7189
·
1 Parent(s): 734ce72

add model loading into the function

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +4 -3
.gitignore CHANGED
@@ -1,2 +1,3 @@
1
  com.mp4
2
  flagged/
 
 
1
  com.mp4
2
  flagged/
3
+ __pycache__/
app.py CHANGED
@@ -90,6 +90,10 @@ def inference(video):
90
  temp_directories.append(temp_dir)
91
  output_composition = temp_dir + "/matted_video.mp4"
92
 
 
 
 
 
93
  convert_video(
94
  model, # The loaded model, can be on any device (cpu or cuda).
95
  input_source=video, # A video file or an image sequence directory.
@@ -112,14 +116,11 @@ if __name__ == "__main__":
112
  temp_directories = []
113
  atexit.register(cleanup_temp_directories)
114
 
115
- model = torch.hub.load("PeterL1n/RobustVideoMatting", "mobilenetv3")
116
-
117
  if torch.cuda.is_available():
118
  free_memory = get_free_memory_gb()
119
  concurrency_count = int(free_memory // 7)
120
  print(f"Using GPU with concurrency: {concurrency_count}")
121
  print(f"Available video memory: {free_memory} GB")
122
- model = model.cuda()
123
  else:
124
  print("Using CPU")
125
  concurrency_count = 1
 
90
  temp_directories.append(temp_dir)
91
  output_composition = temp_dir + "/matted_video.mp4"
92
 
93
+ model = torch.hub.load("PeterL1n/RobustVideoMatting", "mobilenetv3")
94
+ if torch.cuda.is_available():
95
+ model = model.cuda()
96
+
97
  convert_video(
98
  model, # The loaded model, can be on any device (cpu or cuda).
99
  input_source=video, # A video file or an image sequence directory.
 
116
  temp_directories = []
117
  atexit.register(cleanup_temp_directories)
118
 
 
 
119
  if torch.cuda.is_available():
120
  free_memory = get_free_memory_gb()
121
  concurrency_count = int(free_memory // 7)
122
  print(f"Using GPU with concurrency: {concurrency_count}")
123
  print(f"Available video memory: {free_memory} GB")
 
124
  else:
125
  print("Using CPU")
126
  concurrency_count = 1