File size: 2,491 Bytes
d187fae
 
 
 
3cec6df
d187fae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
---
library_name: diffusers
---

<img src="demo.png" alt="a cute robot digital illustration, full pose"/>


Quick start:

```python
from diffusers.models import AutoencoderKL
from diffusers import StableDiffusionPipeline
from diffusers.schedulers.scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from PIL import Image
import torch

DPM_SOLVER_MULTI_STEP_SCHEDULER_CONFIG = {
    "algorithm_type": "dpmsolver++",
    "beta_end": 0.012,
    "beta_schedule": "scaled_linear",
    "beta_start": 0.00085,
    "clip_sample": False,
    "dynamic_thresholding_ratio": 0.995,
    "euler_at_final": False,
    "final_sigmas_type": "zero",
    "lambda_min_clipped": float("-inf"),
    "lower_order_final": True,
    "num_train_timesteps": 1000,
    "prediction_type": "epsilon",
    "sample_max_value": 1.0,
    "set_alpha_to_one": False,
    "solver_order": 2,
    "solver_type": "midpoint",
    "steps_offset": 1,
    "thresholding": False,
    "timestep_spacing": "linspace",
    "trained_betas": None,
    "use_karras_sigmas": True,
    "use_lu_lambdas": False,
    "variance_type": None,
}

if __name__ == "__main__":
    width = 512
    height = int((width * 1.25 // 8) * 8)

    vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse")
    pipe = StableDiffusionPipeline.from_pretrained(
        "runwayml/stable-diffusion-v1-5",
        use_safetensors=True,
        safety_checker=None,
        vae=vae
    ).to("cuda")
    pipe.scheduler = DPMSolverMultistepScheduler.from_config(
        DPM_SOLVER_MULTI_STEP_SCHEDULER_CONFIG,
    )

    prompt = "a cute robot digital illustration, full pose"
    seed = 2544574284

    images = []
    scales = [-1, 0, 1, 1.5]

    for scale in scales:
        generator = torch.Generator(device="cpu").manual_seed(seed)
        pipe.load_lora_weights("scenario-labs/more_details", weight_name="more_details.safetensors")
        pipe.fuse_lora(lora_scale=scale)
        image = pipe(
            prompt,
            generator=generator,
            num_inference_steps=25,
            num_samples=1,
            width=width,
            height=height
        ).images[0]
        pipe.unfuse_lora()
        images.append(image)

    # Combine images into a single row
    combined_image = Image.new('RGB', (width * len(images), height))
    x_offset = 0
    for image in images:
        combined_image.paste(image, (x_offset, 0))
        x_offset += width

    # Display the combined image
    combined_image.save("demo.png")

```