Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
a2d7ecf
1
Parent(s):
aafdc1f
allocate 120s for move_to_gpu
Browse files- adaface/face_id_to_ada_prompt.py +4 -2
- app.py +15 -12
adaface/face_id_to_ada_prompt.py
CHANGED
@@ -661,7 +661,8 @@ class Arc2Face_ID2AdaPrompt(FaceID2AdaPrompt):
|
|
661 |
providers=['CUDAExecutionProvider'],
|
662 |
provider_options=[{"device_id": device_id,
|
663 |
"cudnn_conv_algo_search": "HEURISTIC",
|
664 |
-
"gpu_mem_limit":
|
|
|
665 |
self.face_app.prepare(ctx_id=device_id, det_size=(256, 256))
|
666 |
|
667 |
self.device = device
|
@@ -808,7 +809,8 @@ class ConsistentID_ID2AdaPrompt(FaceID2AdaPrompt):
|
|
808 |
providers=['CUDAExecutionProvider'],
|
809 |
provider_options=[{"device_id": device_id,
|
810 |
"cudnn_conv_algo_search": "HEURISTIC",
|
811 |
-
"gpu_mem_limit":
|
|
|
812 |
self.face_app.prepare(ctx_id=device_id, det_size=(256, 256))
|
813 |
|
814 |
self.device = device
|
|
|
661 |
providers=['CUDAExecutionProvider'],
|
662 |
provider_options=[{"device_id": device_id,
|
663 |
"cudnn_conv_algo_search": "HEURISTIC",
|
664 |
+
"gpu_mem_limit": 2 * 1024**3
|
665 |
+
}])
|
666 |
self.face_app.prepare(ctx_id=device_id, det_size=(256, 256))
|
667 |
|
668 |
self.device = device
|
|
|
809 |
providers=['CUDAExecutionProvider'],
|
810 |
provider_options=[{"device_id": device_id,
|
811 |
"cudnn_conv_algo_search": "HEURISTIC",
|
812 |
+
"gpu_mem_limit": 2 * 1024**3
|
813 |
+
}])
|
814 |
self.face_app.prepare(ctx_id=device_id, det_size=(256, 256))
|
815 |
|
816 |
self.device = device
|
app.py
CHANGED
@@ -23,6 +23,20 @@ def str2bool(v):
|
|
23 |
def is_running_on_spaces():
|
24 |
return os.getenv("SPACE_ID") is not None
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
import argparse
|
27 |
parser = argparse.ArgumentParser()
|
28 |
parser.add_argument("--adaface_encoder_types", type=str, nargs="+", default=["consistentID", "arc2face"],
|
@@ -87,6 +101,7 @@ if not args.test_ui_only:
|
|
87 |
shrink_cross_attn=False,
|
88 |
q_lora_updates_query=args.q_lora_updates_query,
|
89 |
device='cpu')
|
|
|
90 |
|
91 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
92 |
if randomize_seed:
|
@@ -114,18 +129,6 @@ def generate_image(image_paths, image_paths2, guidance_scale, perturb_std,
|
|
114 |
|
115 |
global adaface
|
116 |
|
117 |
-
if is_running_on_spaces():
|
118 |
-
device = 'cuda:0'
|
119 |
-
else:
|
120 |
-
if args.gpu is None:
|
121 |
-
device = "cuda"
|
122 |
-
else:
|
123 |
-
device = f"cuda:{args.gpu}"
|
124 |
-
|
125 |
-
print(f"Device: {device}")
|
126 |
-
|
127 |
-
adaface.to(device)
|
128 |
-
|
129 |
if image_paths is None or len(image_paths) == 0:
|
130 |
raise gr.Error(f"Cannot find any input face image! Please upload a face image.")
|
131 |
|
|
|
23 |
def is_running_on_spaces():
|
24 |
return os.getenv("SPACE_ID") is not None
|
25 |
|
26 |
+
@spaces.GPU(duration=120)
|
27 |
+
def move_to_gpu(adaface):
|
28 |
+
if is_running_on_spaces():
|
29 |
+
device = 'cuda:0'
|
30 |
+
else:
|
31 |
+
if args.gpu is None:
|
32 |
+
device = "cuda"
|
33 |
+
else:
|
34 |
+
device = f"cuda:{args.gpu}"
|
35 |
+
|
36 |
+
print(f"Device: {device}")
|
37 |
+
|
38 |
+
adaface.to(device)
|
39 |
+
|
40 |
import argparse
|
41 |
parser = argparse.ArgumentParser()
|
42 |
parser.add_argument("--adaface_encoder_types", type=str, nargs="+", default=["consistentID", "arc2face"],
|
|
|
101 |
shrink_cross_attn=False,
|
102 |
q_lora_updates_query=args.q_lora_updates_query,
|
103 |
device='cpu')
|
104 |
+
move_to_gpu(adaface)
|
105 |
|
106 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
107 |
if randomize_seed:
|
|
|
129 |
|
130 |
global adaface
|
131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
if image_paths is None or len(image_paths) == 0:
|
133 |
raise gr.Error(f"Cannot find any input face image! Please upload a face image.")
|
134 |
|