Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -60,24 +60,19 @@ def generate(
|
|
60 |
):
|
61 |
if torch.cuda.is_available():
|
62 |
|
|
|
|
|
|
|
63 |
if not use_img2img:
|
64 |
-
pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
|
65 |
-
|
66 |
-
if use_vae:
|
67 |
-
vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
|
68 |
-
pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
|
69 |
|
70 |
if use_img2img:
|
71 |
-
pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
|
72 |
|
73 |
response = requests.get(url)
|
74 |
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
75 |
init_image = init_image.resize((width, height))
|
76 |
|
77 |
-
if use_vae:
|
78 |
-
vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
|
79 |
-
pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
|
80 |
-
|
81 |
if use_lora:
|
82 |
pipe.load_lora_weights(lora)
|
83 |
pipe.fuse_lora(lora_scale)
|
|
|
60 |
):
|
61 |
if torch.cuda.is_available():
|
62 |
|
63 |
+
if use_vae:
|
64 |
+
vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
|
65 |
+
|
66 |
if not use_img2img:
|
67 |
+
pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
|
|
|
|
|
|
|
|
|
68 |
|
69 |
if use_img2img:
|
70 |
+
pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
|
71 |
|
72 |
response = requests.get(url)
|
73 |
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
74 |
init_image = init_image.resize((width, height))
|
75 |
|
|
|
|
|
|
|
|
|
76 |
if use_lora:
|
77 |
pipe.load_lora_weights(lora)
|
78 |
pipe.fuse_lora(lora_scale)
|