patrickvonplaten commited on
Commit
a67f088
·
1 Parent(s): e6c75db

merge conflict

Browse files
Files changed (2) hide show
  1. 1 +66 -0
  2. run_local_xl.py +20 -16
1 ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler
3
+ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
4
+ import time
5
+ from pytorch_lightning import seed_everything
6
+ import os
7
+ from huggingface_hub import HfApi
8
+ # from compel import Compel
9
+ import torch
10
+ import sys
11
+ from pathlib import Path
12
+ import requests
13
+ from PIL import Image
14
+ from io import BytesIO
15
+
16
+ api = HfApi()
17
+ start_time = time.time()
18
+
19
+ use_refiner = bool(int(sys.argv[1]))
20
+ use_diffusers = True
21
+
22
+ if use_diffusers:
23
+ start_time = time.time()
24
+ pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, local_files_only=True)
25
+ pipe.to("cuda")
26
+
27
+ if use_refiner:
28
+ refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
29
+ refiner.to("cuda")
30
+ # refiner.enable_sequential_cpu_offload()
31
+ else:
32
+ pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/blob/main/sd_xl_base_0.9.safetensors", torch_dtype=torch.float16, use_safetensors=True)
33
+ pipe.to("cuda")
34
+
35
+ if use_refiner:
36
+ refiner = StableDiffusionXLImg2ImgPipeline.from_single_file("https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-0.9/blob/main/sd_xl_refiner_0.9.safetensors", torch_dtype=torch.float16, use_safetensors=True)
37
+ refiner.to("cuda")
38
+
39
+
40
+ prompt = "An astronaut riding a green horse on Mars"
41
+ for steps in [24, 27, 31]:
42
+ for denoising_end in [0.63, 0.66, 0.67, 0.71]:
43
+ seed = 0
44
+ seed_everything(seed)
45
+ image = pipe(prompt=prompt, num_inference_steps=40, denoising_end=0.675, output_type="latent" if use_refiner else "pil").images[0]
46
+ # image = pipe(prompt=prompt, output_type="latent" if use_refiner else "pil").images[0]
47
+
48
+ if use_refiner:
49
+ image = refiner(prompt=prompt, num_inference_steps=40, denoising_start=0.675, image=image[None, :]).images[0]
50
+
51
+ # pipe.unet.to(memory_format=torch.channels_last)
52
+ # pipe(prompt=prompt, num_inference_steps=2).images[0]
53
+
54
+ # image = pipe(prompt=prompt, num_images_per_prompt=1, num_inference_steps=40, output_type="latent").images
55
+
56
+ file_name = f"aaa_{seed}"
57
+ path = os.path.join(Path.home(), "images", "ediffi_sdxl", f"{file_name}.png")
58
+ image.save(path)
59
+
60
+ api.upload_file(
61
+ path_or_fileobj=path,
62
+ path_in_repo=path.split("/")[-1],
63
+ repo_id="patrickvonplaten/images",
64
+ repo_type="dataset",
65
+ )
66
+ print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/ediffi_sdxl/{file_name}.png")
run_local_xl.py CHANGED
@@ -37,27 +37,31 @@ else:
37
  refiner.to("cuda")
38
 
39
 
40
- prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
41
- seed_everything(0)
42
- image = pipe(prompt=prompt, num_inference_steps=2, output_type="latent" if use_refiner else "pil").images[0]
 
 
 
43
  # image = pipe(prompt=prompt, output_type="latent" if use_refiner else "pil").images[0]
44
 
45
- if use_refiner:
46
- image = refiner(prompt=prompt, num_inference_steps=5, image=image[None, :]).images[0]
 
47
 
48
  # pipe.unet.to(memory_format=torch.channels_last)
49
  # pipe(prompt=prompt, num_inference_steps=2).images[0]
50
 
51
  # image = pipe(prompt=prompt, num_images_per_prompt=1, num_inference_steps=40, output_type="latent").images
52
 
53
- file_name = f"aaa"
54
- path = os.path.join(Path.home(), "images", f"{file_name}.png")
55
- image.save(path)
56
-
57
- api.upload_file(
58
- path_or_fileobj=path,
59
- path_in_repo=path.split("/")[-1],
60
- repo_id="patrickvonplaten/images",
61
- repo_type="dataset",
62
- )
63
- print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{file_name}.png")
 
37
  refiner.to("cuda")
38
 
39
 
40
+ prompt = "An astronaut riding a green horse on Mars"
41
+ for steps in [24, 27, 31]:
42
+ for denoising_end_t2i in [0.63, 0.66, 0.67, 0.71]:
43
+ seed = 0
44
+ seed_everything(seed)
45
+ image = pipe(prompt=prompt, num_inference_steps=steps, denoising_end=denoising_end_t2i, output_type="latent" if use_refiner else "pil").images[0]
46
  # image = pipe(prompt=prompt, output_type="latent" if use_refiner else "pil").images[0]
47
 
48
+ if use_refiner:
49
+ denoising_start = denoising_end_t2i # denoising_start is denoising_end_t2i
50
+ image = refiner(prompt=prompt, num_inference_steps=steps, denoising_start=denoising_start, image=image[None, :]).images[0]
51
 
52
  # pipe.unet.to(memory_format=torch.channels_last)
53
  # pipe(prompt=prompt, num_inference_steps=2).images[0]
54
 
55
  # image = pipe(prompt=prompt, num_images_per_prompt=1, num_inference_steps=40, output_type="latent").images
56
 
57
+ file_name = f"{steps}_{denoising_end_t2i}"
58
+ path = os.path.join(Path.home(), "images", "ediffi_sdxl", f"{file_name}.png")
59
+ image.save(path)
60
+
61
+ api.upload_file(
62
+ path_or_fileobj=path,
63
+ path_in_repo=path.split("/")[-1],
64
+ repo_id="patrickvonplaten/images",
65
+ repo_type="dataset",
66
+ )
67
+ print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/ediffi_sdxl/{file_name}.png")