yeq6x commited on
Commit
e19c0d1
β€’
1 Parent(s): 2a43cdb
Files changed (2) hide show
  1. app.py +0 -3
  2. scripts/process_utils.py +4 -4
app.py CHANGED
@@ -9,9 +9,6 @@ from scripts.anime import init_model
9
  from datetime import datetime
10
  from pytz import timezone
11
  from scripts.survey import handle_form_submission, handle_visit_choice, handle_proceed, localize, script, generate_image, send_feedback
12
- import torch
13
-
14
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
 
16
  # εˆζœŸεŒ–
17
  initialize(_use_local=False, use_gpu=True, use_dotenv=True)
 
9
  from datetime import datetime
10
  from pytz import timezone
11
  from scripts.survey import handle_form_submission, handle_visit_choice, handle_proceed, localize, script, generate_image, send_feedback
 
 
 
12
 
13
  # εˆζœŸεŒ–
14
  initialize(_use_local=False, use_gpu=True, use_dotenv=True)
scripts/process_utils.py CHANGED
@@ -45,10 +45,10 @@ def initialize(_use_local=False, use_gpu=False, use_dotenv=False):
45
 
46
  print(f"\nDevice: {device}, Local model: {_use_local}\n")
47
 
48
- init_model(use_local)
49
- model = load_wd14_tagger_model()
50
- sotai_gen_pipe = initialize_sotai_model()
51
- refine_gen_pipe = initialize_refine_model()
52
 
53
  def load_lora(pipeline, lora_path, adapter_name, alpha=0.75):
54
  pipeline.load_lora_weights(lora_path, adapter_name)
 
45
 
46
  print(f"\nDevice: {device}, Local model: {_use_local}\n")
47
 
48
+ # init_model(use_local)
49
+ # model = load_wd14_tagger_model()
50
+ # sotai_gen_pipe = initialize_sotai_model()
51
+ # refine_gen_pipe = initialize_refine_model()
52
 
53
  def load_lora(pipeline, lora_path, adapter_name, alpha=0.75):
54
  pipeline.load_lora_weights(lora_path, adapter_name)