Spaces:
Running
on
Zero
Running
on
Zero
- app.py +3 -0
- scripts/anime.py +1 -1
- scripts/process_utils.py +2 -4
app.py
CHANGED
@@ -9,6 +9,9 @@ from scripts.anime import init_model
|
|
9 |
from datetime import datetime
|
10 |
from pytz import timezone
|
11 |
from scripts.survey import handle_form_submission, handle_visit_choice, handle_proceed, localize, script, generate_image, send_feedback
|
|
|
|
|
|
|
12 |
|
13 |
# ๅๆๅ
|
14 |
initialize(_use_local=False, use_gpu=True, use_dotenv=True)
|
|
|
9 |
from datetime import datetime
|
10 |
from pytz import timezone
|
11 |
from scripts.survey import handle_form_submission, handle_visit_choice, handle_proceed, localize, script, generate_image, send_feedback
|
12 |
+
import torch
|
13 |
+
|
14 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
|
16 |
# ๅๆๅ
|
17 |
initialize(_use_local=False, use_gpu=True, use_dotenv=True)
|
scripts/anime.py
CHANGED
@@ -27,7 +27,7 @@ def init_model(use_local=False):
|
|
27 |
model.eval()
|
28 |
|
29 |
# numpy้
ๅใฎ็ปๅใๅใๅใใ็ท็ปใ็ๆใใฆnumpy้
ๅใง่ฟใ
|
30 |
-
@spaces.GPU
|
31 |
def generate_sketch(image, clahe_clip=-1, load_size=512):
|
32 |
"""
|
33 |
Generate sketch image from input image
|
|
|
27 |
model.eval()
|
28 |
|
29 |
# numpy้
ๅใฎ็ปๅใๅใๅใใ็ท็ปใ็ๆใใฆnumpy้
ๅใง่ฟใ
|
30 |
+
# @spaces.GPU
|
31 |
def generate_sketch(image, clahe_clip=-1, load_size=512):
|
32 |
"""
|
33 |
Generate sketch image from input image
|
scripts/process_utils.py
CHANGED
@@ -39,10 +39,8 @@ def initialize(_use_local=False, use_gpu=False, use_dotenv=False):
|
|
39 |
if use_dotenv:
|
40 |
load_dotenv()
|
41 |
global model, sotai_gen_pipe, refine_gen_pipe, use_local, device, torch_dtype
|
42 |
-
|
43 |
-
|
44 |
-
device = torch.device('cuda')
|
45 |
-
torch_dtype = torch.float16
|
46 |
use_local = _use_local
|
47 |
|
48 |
print(f"\nDevice: {device}, Local model: {_use_local}\n")
|
|
|
39 |
if use_dotenv:
|
40 |
load_dotenv()
|
41 |
global model, sotai_gen_pipe, refine_gen_pipe, use_local, device, torch_dtype
|
42 |
+
device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
|
43 |
+
torch_dtype = torch.float16 if device == "cuda" else torch.float32
|
|
|
|
|
44 |
use_local = _use_local
|
45 |
|
46 |
print(f"\nDevice: {device}, Local model: {_use_local}\n")
|