File size: 2,741 Bytes
a3ecce8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e9f5ce
a3ecce8
3e9f5ce
 
a3ecce8
 
3e9f5ce
a3ecce8
 
 
3e9f5ce
a3ecce8
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
#!/usr/bin/env python3
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionPipeline, KDPM2DiscreteScheduler, StableDiffusionImg2ImgPipeline, HeunDiscreteScheduler, KDPM2AncestralDiscreteScheduler, DDIMScheduler
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, AutoencoderKL
import time
from pytorch_lightning import seed_everything
import os
from huggingface_hub import HfApi
# from compel import Compel
import torch
import sys
from pathlib import Path
import requests
from PIL import Image
from io import BytesIO

api = HfApi()
start_time = time.time()

use_refiner = bool(int(sys.argv[1]))
use_diffusers = True

vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16, force_upcast=True)
pipe = StableDiffusionXLInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True, local_files_only=True)
print(time.time() - start_time)
pipe.to("cuda")

def download_image(url):
    response = requests.get(url)
    return Image.open(BytesIO(response.content)).convert("RGB")


img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"

init_image = download_image(img_url).resize((1024, 1024))
mask_image = download_image(mask_url).resize((1024, 1024))

if use_refiner:
    start_time = time.time()
    refiner = StableDiffusionXLInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
    refiner.to("cuda")
    # refiner.enable_sequential_cpu_offload()

prompt = "A majestic tiger sitting on a bench"
steps = 50
seed = 3
denoising_end = None

seed_everything(seed)
start_time = time.time()
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=steps, denoising_end=denoising_end, strength=0.80, output_type="latent").images
print(time.time() - start_time)

if use_refiner:
    image = refiner(prompt=prompt, image=image, mask_image=mask_image, num_inference_steps=steps, denoising_start=denoising_end).images[0]

file_name = f"aaa_1"
path = os.path.join(Path.home(), "images", "ediffi_sdxl", f"{file_name}.png")
image.save(path)

api.upload_file(
    path_or_fileobj=path,
    path_in_repo=path.split("/")[-1],
    repo_id="patrickvonplaten/images",
    repo_type="dataset",
)
print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{file_name}.png")