Charbel Malo commited on
Commit
8a556b1
·
verified ·
1 Parent(s): 8648922

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -27
app.py CHANGED
@@ -1,3 +1,6 @@
 
 
 
1
  import os
2
  import spaces
3
  import cv2
@@ -28,9 +31,9 @@ from utils import trim_video, StreamerThread, ProcessBar, open_directory, split_
28
 
29
  ## ------------------------------ USER ARGS ------------------------------
30
 
31
- parser = argparse.ArgumentParser(description="Swap Face Swapper")
32
  parser.add_argument("--out_dir", help="Default Output directory", default=os.getcwd())
33
- parser.add_argument("--batch_size", help="Gpu batch size", default=1)
34
  parser.add_argument("--cuda", action="store_true", help="Enable cuda", default=False)
35
  parser.add_argument(
36
  "--colab", action="store_true", help="Enable colab mode", default=False
@@ -76,8 +79,6 @@ FACE_ENHANCER_LIST = ["NONE"]
76
  FACE_ENHANCER_LIST.extend(get_available_enhancer_names())
77
  FACE_ENHANCER_LIST.extend(cv2_interpolations)
78
 
79
- ## ------------------------------ SET EXECUTION PROVIDER ------------------------------
80
-
81
  ## ------------------------------ SET EXECUTION PROVIDER ------------------------------
82
  # Note: Non CUDA users may change settings here
83
 
@@ -88,21 +89,16 @@ if USE_CUDA:
88
  if "CUDAExecutionProvider" in available_providers:
89
  print("\n********** Running on CUDA **********\n")
90
  PROVIDER = ["CUDAExecutionProvider", "CPUExecutionProvider"]
91
- cv2.setNumThreads(32)
92
  else:
93
  USE_CUDA = False
94
  print("\n********** CUDA unavailable running on CPU **********\n")
95
- cv2.setNumThreads(1)
96
  else:
97
  USE_CUDA = False
98
  print("\n********** Running on CPU **********\n")
99
- cv2.setNumThreads(1)
100
  device = "cuda" if USE_CUDA else "cpu"
101
  EMPTY_CACHE = lambda: torch.cuda.empty_cache() if device == "cuda" else None
102
 
103
- ## ------------------------------ LOAD MODELS ------------------------------
104
-
105
-
106
  ## ------------------------------ LOAD MODELS ------------------------------
107
 
108
  def load_face_analyser_model(name="buffalo_l"):
@@ -138,7 +134,7 @@ load_face_swapper_model()
138
  ## ------------------------------ MAIN PROCESS ------------------------------
139
 
140
 
141
- @spaces.GPU(duration=200)
142
  def process(
143
  input_type,
144
  image_path,
@@ -169,22 +165,9 @@ def process(
169
  global WORKSPACE
170
  global OUTPUT_FILE
171
  global PREVIEW
172
- global USE_CUDA # Access global variables
173
- global device
174
- global PROVIDER
175
- global FACE_ANALYSER, FACE_SWAPPER, FACE_ENHANCER, FACE_PARSER, NSFW_DETECTOR
176
-
177
- # Set CUDA usage and device
178
- USE_CUDA = True
179
- device = "cuda"
180
- PROVIDER = ["CUDAExecutionProvider", "CPUExecutionProvider"]
181
-
182
- # Reset models to None to reload them with GPU
183
- FACE_ANALYSER = None
184
- FACE_SWAPPER = None
185
- FACE_ENHANCER = None
186
- FACE_PARSER = None
187
- NSFW_DETECTOR = None ## ------------------------------ GUI UPDATE FUNC ------------------------------
188
 
189
  def ui_before():
190
  return (
@@ -944,3 +927,6 @@ if __name__ == "__main__":
944
  print("Running in colab mode")
945
 
946
  interface.launch()
 
 
 
 
1
+
2
+ #### APP.PY CODE END ###
3
+
4
  import os
5
  import spaces
6
  import cv2
 
31
 
32
  ## ------------------------------ USER ARGS ------------------------------
33
 
34
+ parser = argparse.ArgumentParser(description="Swap-Mukham Face Swapper")
35
  parser.add_argument("--out_dir", help="Default Output directory", default=os.getcwd())
36
+ parser.add_argument("--batch_size", help="Gpu batch size", default=32)
37
  parser.add_argument("--cuda", action="store_true", help="Enable cuda", default=False)
38
  parser.add_argument(
39
  "--colab", action="store_true", help="Enable colab mode", default=False
 
79
  FACE_ENHANCER_LIST.extend(get_available_enhancer_names())
80
  FACE_ENHANCER_LIST.extend(cv2_interpolations)
81
 
 
 
82
  ## ------------------------------ SET EXECUTION PROVIDER ------------------------------
83
  # Note: Non CUDA users may change settings here
84
 
 
89
  if "CUDAExecutionProvider" in available_providers:
90
  print("\n********** Running on CUDA **********\n")
91
  PROVIDER = ["CUDAExecutionProvider", "CPUExecutionProvider"]
 
92
  else:
93
  USE_CUDA = False
94
  print("\n********** CUDA unavailable running on CPU **********\n")
 
95
  else:
96
  USE_CUDA = False
97
  print("\n********** Running on CPU **********\n")
98
+
99
  device = "cuda" if USE_CUDA else "cpu"
100
  EMPTY_CACHE = lambda: torch.cuda.empty_cache() if device == "cuda" else None
101
 
 
 
 
102
  ## ------------------------------ LOAD MODELS ------------------------------
103
 
104
  def load_face_analyser_model(name="buffalo_l"):
 
134
  ## ------------------------------ MAIN PROCESS ------------------------------
135
 
136
 
137
+ @spaces.GPU(duration=300, enable_queue=True)
138
  def process(
139
  input_type,
140
  image_path,
 
165
  global WORKSPACE
166
  global OUTPUT_FILE
167
  global PREVIEW
168
+ WORKSPACE, OUTPUT_FILE, PREVIEW = None, None, None
169
+
170
+ ## ------------------------------ GUI UPDATE FUNC ------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
171
 
172
  def ui_before():
173
  return (
 
927
  print("Running in colab mode")
928
 
929
  interface.launch()
930
+
931
+
932
+ #### APP.PY CODE END ###