eyal.benaroche commited on
Commit
29a59f2
β€’
1 Parent(s): 7b0a20d

update loading lora

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -29,11 +29,10 @@ sdxl_loras_raw = sorted(sdxl_loras_raw, key=lambda x: x["likes"], reverse=True)
29
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
30
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
31
 
32
- if gr.NO_RELOAD:
33
- pipe = DiffusionPipeline.from_pretrained(model_id, variant="fp16")
34
- pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
35
- pipe.load_lora_weights("jasperai/flash-sdxl", adapter_name="lora")
36
- pipe.to(device=DEVICE, dtype=torch.float16)
37
 
38
 
39
  MAX_SEED = np.iinfo(np.int32).max
@@ -43,7 +42,6 @@ MAX_IMAGE_SIZE = 1024
43
  def check_and_load_lora_user(user_lora_selector, user_lora_weight, gr_lora_loaded):
44
  flash_sdxl_id = "jasperai/flash-sdxl"
45
 
46
- global pipe
47
  if user_lora_selector == "" or user_lora_selector == "":
48
  raise gr.Error("Please select a LoRA before running the inference.")
49
 
 
29
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
30
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
31
 
32
+ pipe = DiffusionPipeline.from_pretrained(model_id, variant="fp16")
33
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
34
+ pipe.load_lora_weights("jasperai/flash-sdxl", adapter_name="lora")
35
+ pipe.to(device=DEVICE, dtype=torch.float16)
 
36
 
37
 
38
  MAX_SEED = np.iinfo(np.int32).max
 
42
  def check_and_load_lora_user(user_lora_selector, user_lora_weight, gr_lora_loaded):
43
  flash_sdxl_id = "jasperai/flash-sdxl"
44
 
 
45
  if user_lora_selector == "" or user_lora_selector == "":
46
  raise gr.Error("Please select a LoRA before running the inference.")
47