Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -10,12 +10,8 @@ from diffusers import DiffusionPipeline, StableDiffusionXLPipeline, EDMEulerSche
|
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
from huggingface_hub import InferenceClient
|
12 |
|
13 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
-
dtype = torch.float16
|
15 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
16 |
-
|
17 |
-
repo = "fluently/Fluently-XL-Final"
|
18 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
|
19 |
pipe.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
|
20 |
pipe.set_adapters("lora")
|
21 |
pipe.to("cuda")
|
|
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
from huggingface_hub import InferenceClient
|
12 |
|
|
|
|
|
13 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
14 |
+
pipe = StableDiffusionXLPipeline.from_pretrained("RunDiffusion/Juggernaut-XL-v9", torch_dtype=torch.float16, vae=vae)
|
|
|
|
|
15 |
pipe.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
|
16 |
pipe.set_adapters("lora")
|
17 |
pipe.to("cuda")
|