patrickvonplaten commited on
Commit
3863db3
·
2 Parent(s): c56864f a1fd59a

Merge branch 'main' of https://huggingface.co/diffusers/tools

Browse files
Files changed (3) hide show
  1. run_local_xl.py +1 -1
  2. run_local_xl_inpaint.py +67 -0
  3. run_xl_ediffi.py +24 -10
run_local_xl.py CHANGED
@@ -17,7 +17,7 @@ api = HfApi()
17
  start_time = time.time()
18
 
19
  use_refiner = bool(int(sys.argv[1]))
20
- use_diffusers = True
21
 
22
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, force_upcast=True)
23
  if use_diffusers:
 
17
  start_time = time.time()
18
 
19
  use_refiner = bool(int(sys.argv[1]))
20
+ use_diffusers = False
21
 
22
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, force_upcast=True)
23
  if use_diffusers:
run_local_xl_inpaint.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler
3
+ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, AutoencoderKL
4
+ import time
5
+ from pytorch_lightning import seed_everything
6
+ import os
7
+ from huggingface_hub import HfApi
8
+ # from compel import Compel
9
+ import torch
10
+ import sys
11
+ from pathlib import Path
12
+ import requests
13
+ from PIL import Image
14
+ from io import BytesIO
15
+
16
+ api = HfApi()
17
+ start_time = time.time()
18
+
19
+ use_refiner = bool(int(sys.argv[1]))
20
+ use_diffusers = True
21
+
22
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, force_upcast=True)
23
+ pipe = StableDiffusionXLInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, local_files_only=True)
24
+ print(time.time() - start_time)
25
+ pipe.to("cuda")
26
+
27
+ def download_image(url):
28
+ response = requests.get(url)
29
+ return Image.open(BytesIO(response.content)).convert("RGB")
30
+
31
+
32
+ img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
33
+ mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
34
+
35
+ init_image = download_image(img_url).resize((1024, 1024))
36
+ mask_image = download_image(mask_url).resize((1024, 1024))
37
+
38
+ if use_refiner:
39
+ start_time = time.time()
40
+ refiner = StableDiffusionXLInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
41
+ refiner.to("cuda")
42
+ # refiner.enable_sequential_cpu_offload()
43
+
44
+ prompt = "A majestic tiger sitting on a bench"
45
+ steps = 50
46
+ seed = 3
47
+ denoising_end = None
48
+
49
+ seed_everything(seed)
50
+ start_time = time.time()
51
+ image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=steps, denoising_end=denoising_end, strength=0.80, output_type="latent").images
52
+ print(time.time() - start_time)
53
+
54
+ if use_refiner:
55
+ image = refiner(prompt=prompt, image=image, mask_image=mask_image, num_inference_steps=steps, denoising_start=denoising_end).images[0]
56
+
57
+ file_name = f"aaa_1"
58
+ path = os.path.join(Path.home(), "images", "ediffi_sdxl", f"{file_name}.png")
59
+ image.save(path)
60
+
61
+ api.upload_file(
62
+ path_or_fileobj=path,
63
+ path_in_repo=path.split("/")[-1],
64
+ repo_id="patrickvonplaten/images",
65
+ repo_type="dataset",
66
+ )
67
+ print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{file_name}.png")
run_xl_ediffi.py CHANGED
@@ -1,5 +1,5 @@
1
  #!/usr/bin/env python3
2
- from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler
3
  from diffusers import DiffusionPipeline
4
  import time
5
  from pytorch_lightning import seed_everything
@@ -18,25 +18,39 @@ from torch.nn.functional import fractional_max_pool2d_with_indices
18
  api = HfApi()
19
  start_time = time.time()
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  model_id = "stabilityai/stable-diffusion-xl-base-0.9"
22
- model_id = "runwayml/stable-diffusion-v1-5"
23
  pipe_high_noise = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, local_files_only=True)
24
- pipe_high_noise.scheduler = EulerDiscreteScheduler.from_config(pipe_high_noise.scheduler.config)
25
  pipe_high_noise.to("cuda")
26
 
27
  pipe_low_noise = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
28
- pipe_low_noise.scheduler = EulerDiscreteScheduler.from_config(pipe_low_noise.scheduler.config)
29
  pipe_low_noise.to("cuda")
30
 
31
- prompt = "A majestic lion jumping from a big stone at night"
32
 
33
- num_inference_steps = 40
34
- high_noise_frac = 0.8
35
 
36
- # seed = 0
37
- # seed_everything(seed)
 
 
 
38
 
39
- image = pipe_high_noise(prompt=prompt, num_inference_steps=num_inference_steps, denoising_end=high_noise_frac, output_type="pt").images
40
  image = pipe_low_noise(prompt=prompt, num_inference_steps=num_inference_steps, denoising_start=high_noise_frac, image=image).images[0]
41
 
42
  file_name = f"aaa_1"
 
1
  #!/usr/bin/env python3
2
+ from diffusers import DPMSolverMultistepScheduler, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler, HeunDiscreteScheduler, DEISMultistepScheduler
3
  from diffusers import DiffusionPipeline
4
  import time
5
  from pytorch_lightning import seed_everything
 
18
  api = HfApi()
19
  start_time = time.time()
20
 
21
+ scheduler = DPMSolverMultistepScheduler(
22
+ beta_start=0.00085,
23
+ beta_end=0.012,
24
+ beta_schedule="scaled_linear",
25
+ prediction_type="epsilon",
26
+ num_train_timesteps=1000,
27
+ trained_betas=None,
28
+ thresholding=False,
29
+ algorithm_type="dpmsolver++",
30
+ solver_type="midpoint",
31
+ lower_order_final=True,
32
+ use_karras_sigmas=True,
33
+ )
34
+
35
  model_id = "stabilityai/stable-diffusion-xl-base-0.9"
 
36
  pipe_high_noise = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, local_files_only=True)
37
+ pipe_high_noise.scheduler = scheduler
38
  pipe_high_noise.to("cuda")
39
 
40
  pipe_low_noise = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
41
+ pipe_low_noise.scheduler = scheduler
42
  pipe_low_noise.to("cuda")
43
 
44
+ prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
45
 
 
 
46
 
47
+ random_generator = torch.Generator()
48
+ random_generator.manual_seed(0)
49
+
50
+ num_inference_steps = 100
51
+ high_noise_frac = 0.8
52
 
53
+ image = pipe_high_noise(prompt=prompt, num_inference_steps=num_inference_steps, denoising_end=high_noise_frac, output_type="latent").images
54
  image = pipe_low_noise(prompt=prompt, num_inference_steps=num_inference_steps, denoising_start=high_noise_frac, image=image).images[0]
55
 
56
  file_name = f"aaa_1"