Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,23 +5,26 @@ from safetensors.torch import load_file
|
|
5 |
import gradio as gr
|
6 |
import spaces
|
7 |
|
8 |
-
|
9 |
base = "stabilityai/stable-diffusion-xl-base-1.0"
|
10 |
repo = "ByteDance/SDXL-Lightning"
|
11 |
-
ckpt = "
|
|
|
|
|
12 |
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|
13 |
unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
|
14 |
pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
|
|
15 |
# Ensure sampler uses "trailing" timesteps and "sample" prediction type.
|
16 |
-
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing"
|
|
|
17 |
# Load model.
|
18 |
@spaces.GPU
|
19 |
-
def generate(prompt):
|
20 |
-
image = pipe(prompt, num_inference_steps=
|
21 |
return image
|
22 |
|
23 |
output_image = gr.Image(type="pil")
|
24 |
-
demo = gr.Interface(fn=generate, inputs=
|
25 |
|
26 |
if __name__ == "__main__":
|
27 |
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|
|
|
5 |
import gradio as gr
|
6 |
import spaces
|
7 |
|
|
|
8 |
base = "stabilityai/stable-diffusion-xl-base-1.0"
|
9 |
repo = "ByteDance/SDXL-Lightning"
|
10 |
+
ckpt = "sdxl_lightning_4step_unet.safetensors" # Use the correct ckpt for your step setting!
|
11 |
+
|
12 |
+
# Load model.
|
13 |
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|
14 |
unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
|
15 |
pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
16 |
+
|
17 |
# Ensure sampler uses "trailing" timesteps and "sample" prediction type.
|
18 |
+
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
19 |
+
|
20 |
# Load model.
|
21 |
@spaces.GPU
|
22 |
+
def generate(prompt, steps):
|
23 |
+
image = pipe(prompt, num_inference_steps=steps, guidance_scale=0).images[0]
|
24 |
return image
|
25 |
|
26 |
output_image = gr.Image(type="pil")
|
27 |
+
demo = gr.Interface(fn=generate, inputs=[gr.Text, gr.slider], outputs=output_image)
|
28 |
|
29 |
if __name__ == "__main__":
|
30 |
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|