Charbel Malo commited on
Commit
5ee28a7
·
verified ·
1 Parent(s): aed0c79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -3
app.py CHANGED
@@ -30,7 +30,7 @@ from utils import trim_video, StreamerThread, ProcessBar, open_directory, split_
30
 
31
  parser = argparse.ArgumentParser(description="Swap Face Swapper")
32
  parser.add_argument("--out_dir", help="Default Output directory", default=os.getcwd())
33
- parser.add_argument("--batch_size", help="Gpu batch size", default=32)
34
  parser.add_argument("--cuda", action="store_true", help="Enable cuda", default=False)
35
  parser.add_argument(
36
  "--colab", action="store_true", help="Enable colab mode", default=False
@@ -76,14 +76,30 @@ FACE_ENHANCER_LIST = ["NONE"]
76
  FACE_ENHANCER_LIST.extend(get_available_enhancer_names())
77
  FACE_ENHANCER_LIST.extend(cv2_interpolations)
78
 
 
 
79
  ## ------------------------------ SET EXECUTION PROVIDER ------------------------------
80
  # Note: Non CUDA users may change settings here
81
 
82
- PROVIDER = ["CPUExecutionProvider"] # Default to CPU provider
83
- device = "cpu"
 
 
 
 
 
 
 
 
 
 
 
84
 
 
85
  EMPTY_CACHE = lambda: torch.cuda.empty_cache() if device == "cuda" else None
86
 
 
 
87
 
88
  ## ------------------------------ LOAD MODELS ------------------------------
89
 
 
30
 
31
  parser = argparse.ArgumentParser(description="Swap Face Swapper")
32
  parser.add_argument("--out_dir", help="Default Output directory", default=os.getcwd())
33
+ parser.add_argument("--batch_size", help="Gpu batch size", default=1)
34
  parser.add_argument("--cuda", action="store_true", help="Enable cuda", default=False)
35
  parser.add_argument(
36
  "--colab", action="store_true", help="Enable colab mode", default=False
 
76
  FACE_ENHANCER_LIST.extend(get_available_enhancer_names())
77
  FACE_ENHANCER_LIST.extend(cv2_interpolations)
78
 
79
+ ## ------------------------------ SET EXECUTION PROVIDER ------------------------------
80
+
81
  ## ------------------------------ SET EXECUTION PROVIDER ------------------------------
82
  # Note: Non CUDA users may change settings here
83
 
84
+ PROVIDER = ["CPUExecutionProvider"]
85
+
86
+ if USE_CUDA:
87
+ available_providers = onnxruntime.get_available_providers()
88
+ if "CUDAExecutionProvider" in available_providers:
89
+ print("\n********** Running on CUDA **********\n")
90
+ PROVIDER = ["CUDAExecutionProvider", "CPUExecutionProvider"]
91
+ else:
92
+ USE_CUDA = False
93
+ print("\n********** CUDA unavailable running on CPU **********\n")
94
+ else:
95
+ USE_CUDA = False
96
+ print("\n********** Running on CPU **********\n")
97
 
98
+ device = "cuda" if USE_CUDA else "cpu"
99
  EMPTY_CACHE = lambda: torch.cuda.empty_cache() if device == "cuda" else None
100
 
101
+ ## ------------------------------ LOAD MODELS ------------------------------
102
+
103
 
104
  ## ------------------------------ LOAD MODELS ------------------------------
105