hervenivon commited on
Commit
d187fae
·
verified ·
1 Parent(s): 5d63978

docs: update README.md

Browse files
Files changed (1) hide show
  1. README.md +89 -0
README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: diffusers
3
+ ---
4
+
5
+
6
+
7
+ Quick start:
8
+
9
+ ```python
10
+ from diffusers.models import AutoencoderKL
11
+ from diffusers import StableDiffusionPipeline
12
+ from diffusers.schedulers.scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
13
+ from PIL import Image
14
+ import torch
15
+
16
+ DPM_SOLVER_MULTI_STEP_SCHEDULER_CONFIG = {
17
+ "algorithm_type": "dpmsolver++",
18
+ "beta_end": 0.012,
19
+ "beta_schedule": "scaled_linear",
20
+ "beta_start": 0.00085,
21
+ "clip_sample": False,
22
+ "dynamic_thresholding_ratio": 0.995,
23
+ "euler_at_final": False,
24
+ "final_sigmas_type": "zero",
25
+ "lambda_min_clipped": float("-inf"),
26
+ "lower_order_final": True,
27
+ "num_train_timesteps": 1000,
28
+ "prediction_type": "epsilon",
29
+ "sample_max_value": 1.0,
30
+ "set_alpha_to_one": False,
31
+ "solver_order": 2,
32
+ "solver_type": "midpoint",
33
+ "steps_offset": 1,
34
+ "thresholding": False,
35
+ "timestep_spacing": "linspace",
36
+ "trained_betas": None,
37
+ "use_karras_sigmas": True,
38
+ "use_lu_lambdas": False,
39
+ "variance_type": None,
40
+ }
41
+
42
+ if __name__ == "__main__":
43
+ width = 512
44
+ height = int((width * 1.25 // 8) * 8)
45
+
46
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse")
47
+ pipe = StableDiffusionPipeline.from_pretrained(
48
+ # "scenario-labs/Flat-2D_Animerge_v45-sharp",
49
+ "runwayml/stable-diffusion-v1-5",
50
+ use_safetensors=True,
51
+ safety_checker=None,
52
+ vae=vae
53
+ ).to("cuda")
54
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(
55
+ DPM_SOLVER_MULTI_STEP_SCHEDULER_CONFIG,
56
+ )
57
+
58
+ prompt = "a cute robot digital illustration, full pose"
59
+ seed = 2544574284
60
+
61
+ images = []
62
+ scales = [-1, 0, 1, 1.5]
63
+
64
+ for scale in scales:
65
+ generator = torch.Generator(device="cpu").manual_seed(seed)
66
+ pipe.load_lora_weights("scenario-labs/more_details", weight_name="more_details.safetensors")
67
+ pipe.fuse_lora(lora_scale=scale)
68
+ image = pipe(
69
+ prompt,
70
+ generator=generator,
71
+ num_inference_steps=25,
72
+ num_samples=1,
73
+ width=width,
74
+ height=height
75
+ ).images[0]
76
+ pipe.unfuse_lora()
77
+ images.append(image)
78
+
79
+ # Combine images into a single row
80
+ combined_image = Image.new('RGB', (width * len(images), height))
81
+ x_offset = 0
82
+ for image in images:
83
+ combined_image.paste(image, (x_offset, 0))
84
+ x_offset += width
85
+
86
+ # Display the combined image
87
+ combined_image.save("demo.png")
88
+
89
+ ```