Quick start:
from diffusers.models import AutoencoderKL
from diffusers import StableDiffusionPipeline
from diffusers.schedulers.scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from PIL import Image
import torch
DPM_SOLVER_MULTI_STEP_SCHEDULER_CONFIG = {
"algorithm_type": "dpmsolver++",
"beta_end": 0.012,
"beta_schedule": "scaled_linear",
"beta_start": 0.00085,
"clip_sample": False,
"dynamic_thresholding_ratio": 0.995,
"euler_at_final": False,
"final_sigmas_type": "zero",
"lambda_min_clipped": float("-inf"),
"lower_order_final": True,
"num_train_timesteps": 1000,
"prediction_type": "epsilon",
"sample_max_value": 1.0,
"set_alpha_to_one": False,
"solver_order": 2,
"solver_type": "midpoint",
"steps_offset": 1,
"thresholding": False,
"timestep_spacing": "linspace",
"trained_betas": None,
"use_karras_sigmas": True,
"use_lu_lambdas": False,
"variance_type": None,
}
if __name__ == "__main__":
width = 512
height = int((width * 1.25 // 8) * 8)
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse")
pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
use_safetensors=True,
safety_checker=None,
vae=vae
).to("cuda")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
DPM_SOLVER_MULTI_STEP_SCHEDULER_CONFIG,
)
prompt = "a cute robot digital illustration, full pose"
seed = 2544574284
images = []
scales = [-1, 0, 1, 1.5]
for scale in scales:
generator = torch.Generator(device="cpu").manual_seed(seed)
pipe.load_lora_weights("scenario-labs/more_details", weight_name="more_details.safetensors")
pipe.fuse_lora(lora_scale=scale)
image = pipe(
prompt,
generator=generator,
num_inference_steps=25,
num_samples=1,
width=width,
height=height
).images[0]
pipe.unfuse_lora()
images.append(image)
# Combine images into a single row
combined_image = Image.new('RGB', (width * len(images), height))
x_offset = 0
for image in images:
combined_image.paste(image, (x_offset, 0))
x_offset += width
# Display the combined image
combined_image.save("demo.png")
- Downloads last month
- 1,014