Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -62,6 +62,10 @@ def generate(
|
|
62 |
|
63 |
if not use_img2img:
|
64 |
pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
|
|
|
|
|
|
|
|
|
65 |
|
66 |
if use_img2img:
|
67 |
pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
|
@@ -70,6 +74,10 @@ def generate(
|
|
70 |
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
71 |
init_image = init_image.resize((width, height))
|
72 |
|
|
|
|
|
|
|
|
|
73 |
if use_lora:
|
74 |
pipe.load_lora_weights(lora)
|
75 |
pipe.fuse_lora(lora_scale)
|
|
|
62 |
|
63 |
if not use_img2img:
|
64 |
pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
|
65 |
+
|
66 |
+
if use_vae:
|
67 |
+
vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
|
68 |
+
pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
|
69 |
|
70 |
if use_img2img:
|
71 |
pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
|
|
|
74 |
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
75 |
init_image = init_image.resize((width, height))
|
76 |
|
77 |
+
if use_vae:
|
78 |
+
vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
|
79 |
+
pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
|
80 |
+
|
81 |
if use_lora:
|
82 |
pipe.load_lora_weights(lora)
|
83 |
pipe.fuse_lora(lora_scale)
|