Spaces:
Running
on
Zero
Running
on
Zero
tori29umai
commited on
Commit
•
2d9bada
1
Parent(s):
872f570
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import spaces
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
-
from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL
|
5 |
from PIL import Image
|
6 |
import os
|
7 |
import time
|
@@ -41,20 +41,14 @@ def load_model(lora_model):
|
|
41 |
pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
|
42 |
"cagliostrolab/animagine-xl-3.1", controlnet=controlnet, vae=vae, torch_dtype=dtype
|
43 |
)
|
44 |
-
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
45 |
-
|
46 |
pipe.enable_model_cpu_offload()
|
47 |
|
48 |
# LoRAモデルの設定
|
49 |
if lora_model == "とりにく風":
|
50 |
-
pipe.load_lora_weights(lora_dir, weight_name="
|
51 |
-
pipe.
|
52 |
-
pipe.set_adapters(["tcd-animaginexl-3_1", "tori29umai_line"], adapter_weights=[1.0, 1.4])
|
53 |
-
pipe.fuse_lora()
|
54 |
elif lora_model == "プレーン":
|
55 |
-
|
56 |
-
pipe.set_adapters(["tcd-animaginexl-3_1"], adapter_weights=[1.0])
|
57 |
-
pipe.fuse_lora()
|
58 |
|
59 |
# 現在のLoRAモデルを保存
|
60 |
current_lora_model = lora_model
|
@@ -91,9 +85,8 @@ def predict(lora_model, input_image_path, prompt, negative_prompt, controlnet_sc
|
|
91 |
negative_prompt=negative_prompt,
|
92 |
controlnet_conditioning_scale=float(controlnet_scale),
|
93 |
generator=generator,
|
94 |
-
num_inference_steps=
|
95 |
-
|
96 |
-
eta=0.3,
|
97 |
).images[0]
|
98 |
print(f"Time taken: {time.time() - last_time}")
|
99 |
output_image = output_image.resize(input_image.size, Image.LANCZOS)
|
@@ -169,4 +162,4 @@ class Img2Img:
|
|
169 |
|
170 |
img2img = Img2Img()
|
171 |
img2img.demo.queue()
|
172 |
-
img2img.demo.launch(share=True)
|
|
|
1 |
import spaces
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
+
from diffusers import ControlNetModel, StableDiffusionXLControlNetImg2ImgPipeline, AutoencoderKL
|
5 |
from PIL import Image
|
6 |
import os
|
7 |
import time
|
|
|
41 |
pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
|
42 |
"cagliostrolab/animagine-xl-3.1", controlnet=controlnet, vae=vae, torch_dtype=dtype
|
43 |
)
|
|
|
|
|
44 |
pipe.enable_model_cpu_offload()
|
45 |
|
46 |
# LoRAモデルの設定
|
47 |
if lora_model == "とりにく風":
|
48 |
+
pipe.load_lora_weights(lora_dir, weight_name="tori29umai_line.safetensors", adapter_name="tori29umai_line")
|
49 |
+
pipe.set_adapters(["tori29umai_line"], adapter_weights=[1.0])
|
|
|
|
|
50 |
elif lora_model == "プレーン":
|
51 |
+
pass # プレーンの場合はLoRAを読み込まない
|
|
|
|
|
52 |
|
53 |
# 現在のLoRAモデルを保存
|
54 |
current_lora_model = lora_model
|
|
|
85 |
negative_prompt=negative_prompt,
|
86 |
controlnet_conditioning_scale=float(controlnet_scale),
|
87 |
generator=generator,
|
88 |
+
num_inference_steps=30,
|
89 |
+
eta=1.0,
|
|
|
90 |
).images[0]
|
91 |
print(f"Time taken: {time.time() - last_time}")
|
92 |
output_image = output_image.resize(input_image.size, Image.LANCZOS)
|
|
|
162 |
|
163 |
img2img = Img2Img()
|
164 |
img2img.demo.queue()
|
165 |
+
img2img.demo.launch(share=True)
|