Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,15 +9,14 @@ import spaces
|
|
9 |
base = "stabilityai/stable-diffusion-xl-base-1.0"
|
10 |
repo = "ByteDance/SDXL-Lightning"
|
11 |
ckpt = "sdxl_lightning_1step_unet_x0.safetensors" # Use the correct ckpt for your step setting!
|
12 |
-
|
|
|
|
|
|
|
|
|
13 |
# Load model.
|
14 |
@spaces.GPU
|
15 |
def generate(prompt):
|
16 |
-
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|
17 |
-
unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
|
18 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
19 |
-
# Ensure sampler uses "trailing" timesteps and "sample" prediction type.
|
20 |
-
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample")
|
21 |
image = pipe(prompt, num_inference_steps=1, guidance_scale=0).images[0]
|
22 |
return image
|
23 |
# Ensure using the same inference steps as the loaded model and CFG set to 0.
|
|
|
9 |
base = "stabilityai/stable-diffusion-xl-base-1.0"
|
10 |
repo = "ByteDance/SDXL-Lightning"
|
11 |
ckpt = "sdxl_lightning_1step_unet_x0.safetensors" # Use the correct ckpt for your step setting!
|
12 |
+
unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
|
13 |
+
unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
|
14 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
15 |
+
# Ensure sampler uses "trailing" timesteps and "sample" prediction type.
|
16 |
+
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample")
|
17 |
# Load model.
|
18 |
@spaces.GPU
|
19 |
def generate(prompt):
|
|
|
|
|
|
|
|
|
|
|
20 |
image = pipe(prompt, num_inference_steps=1, guidance_scale=0).images[0]
|
21 |
return image
|
22 |
# Ensure using the same inference steps as the loaded model and CFG set to 0.
|