yeq6x commited on
Commit
2a43cdb
โ€ข
1 Parent(s): cec82ab
Files changed (3) hide show
  1. app.py +3 -0
  2. scripts/anime.py +1 -1
  3. scripts/process_utils.py +2 -4
app.py CHANGED
@@ -9,6 +9,9 @@ from scripts.anime import init_model
9
  from datetime import datetime
10
  from pytz import timezone
11
  from scripts.survey import handle_form_submission, handle_visit_choice, handle_proceed, localize, script, generate_image, send_feedback
 
 
 
12
 
13
  # ๅˆๆœŸๅŒ–
14
  initialize(_use_local=False, use_gpu=True, use_dotenv=True)
 
9
  from datetime import datetime
10
  from pytz import timezone
11
  from scripts.survey import handle_form_submission, handle_visit_choice, handle_proceed, localize, script, generate_image, send_feedback
12
+ import torch
13
+
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
 
16
  # ๅˆๆœŸๅŒ–
17
  initialize(_use_local=False, use_gpu=True, use_dotenv=True)
scripts/anime.py CHANGED
@@ -27,7 +27,7 @@ def init_model(use_local=False):
27
  model.eval()
28
 
29
  # numpy้…ๅˆ—ใฎ็”ปๅƒใ‚’ๅ—ใ‘ๅ–ใ‚Šใ€็ทš็”ปใ‚’็”Ÿๆˆใ—ใฆnumpy้…ๅˆ—ใง่ฟ”ใ™
30
- @spaces.GPU
31
  def generate_sketch(image, clahe_clip=-1, load_size=512):
32
  """
33
  Generate sketch image from input image
 
27
  model.eval()
28
 
29
  # numpy้…ๅˆ—ใฎ็”ปๅƒใ‚’ๅ—ใ‘ๅ–ใ‚Šใ€็ทš็”ปใ‚’็”Ÿๆˆใ—ใฆnumpy้…ๅˆ—ใง่ฟ”ใ™
30
+ # @spaces.GPU
31
  def generate_sketch(image, clahe_clip=-1, load_size=512):
32
  """
33
  Generate sketch image from input image
scripts/process_utils.py CHANGED
@@ -39,10 +39,8 @@ def initialize(_use_local=False, use_gpu=False, use_dotenv=False):
39
  if use_dotenv:
40
  load_dotenv()
41
  global model, sotai_gen_pipe, refine_gen_pipe, use_local, device, torch_dtype
42
- # device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
43
- # torch_dtype = torch.float16 if device == "cuda" else torch.float32
44
- device = torch.device('cuda')
45
- torch_dtype = torch.float16
46
  use_local = _use_local
47
 
48
  print(f"\nDevice: {device}, Local model: {_use_local}\n")
 
39
  if use_dotenv:
40
  load_dotenv()
41
  global model, sotai_gen_pipe, refine_gen_pipe, use_local, device, torch_dtype
42
+ device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
43
+ torch_dtype = torch.float16 if device == "cuda" else torch.float32
 
 
44
  use_local = _use_local
45
 
46
  print(f"\nDevice: {device}, Local model: {_use_local}\n")