File size: 2,765 Bytes
2acb9f2 6421583 2acb9f2 7f2323b 2acb9f2 7f2323b 3e9f5ce 7f2323b 6421583 f985b1c 6421583 04896b2 f985b1c 04896b2 6421583 04896b2 f985b1c 04896b2 f985b1c 04896b2 f985b1c 04896b2 f985b1c 04896b2 f985b1c 2acb9f2 a67f088 6421583 04896b2 6421583 2acb9f2 04896b2 6421583 2acb9f2 04896b2 a67f088 04896b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
#!/usr/bin/env python3
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, AutoencoderKL
import time
from pytorch_lightning import seed_everything
import os
from huggingface_hub import HfApi
# from compel import Compel
import torch
import sys
from pathlib import Path
import requests
from PIL import Image
from io import BytesIO
api = HfApi()
start_time = time.time()
use_refiner = bool(int(sys.argv[1]))
use_diffusers = False
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, force_upcast=True)
if use_diffusers:
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, local_files_only=True)
print(time.time() - start_time)
pipe.to("cuda")
if use_refiner:
start_time = time.time()
refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
print(time.time() - start_time)
refiner.to("cuda")
# refiner.enable_sequential_cpu_offload()
else:
start_time = time.time()
pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/blob/main/sd_xl_base_0.9.safetensors", torch_dtype=torch.float16, use_safetensors=True)
print(time.time() - start_time)
pipe.to("cuda")
if use_refiner:
start_time = time.time()
refiner = StableDiffusionXLImg2ImgPipeline.from_single_file("https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-0.9/blob/main/sd_xl_refiner_0.9.safetensors", torch_dtype=torch.float16, use_safetensors=True)
print(time.time() - start_time)
refiner.to("cuda")
prompt = "An astronaut riding a green horse on Mars"
steps = 20
seed = 0
seed_everything(seed)
start_time = time.time()
image = pipe(prompt=prompt, num_inference_steps=steps, output_type="pil").images[0]
print(time.time() - start_time)
if use_refiner:
image = refiner(prompt=prompt, num_inference_steps=steps - 10, image=image).images[0]
file_name = f"aaa"
path = os.path.join(Path.home(), "images", "ediffi_sdxl", f"{file_name}.png")
image.save(path)
api.upload_file(
path_or_fileobj=path,
path_in_repo=path.split("/")[-1],
repo_id="patrickvonplaten/images",
repo_type="dataset",
)
print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{file_name}.png")
|