File size: 2,879 Bytes
1cb6bad
8d5c6a5
1cb6bad
 
 
 
ee7720a
1cb6bad
 
 
ee7720a
 
 
 
1cb6bad
ee7720a
 
1cb6bad
 
 
0473e4c
 
 
ee7720a
 
 
 
 
 
 
 
 
1cb6bad
ee7720a
 
 
 
1cb6bad
 
ee7720a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1cb6bad
8d5c6a5
1cb6bad
ee7720a
1cb6bad
 
ee7720a
1cb6bad
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr
import spaces

from diffusers import StableDiffusionXLPipeline, DDIMScheduler
import torch
import sa_handler
import math

# init models

scheduler = DDIMScheduler(
    beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear",
    clip_sample=False, set_alpha_to_one=False)

pipeline = StableDiffusionXLPipeline.from_pretrained(
    "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16",
    use_safetensors=True,
    scheduler=scheduler
).to("cuda")

pipeline.enable_model_cpu_offload() 
pipeline.enable_vae_slicing()

# DDIM inversion

from diffusers.utils import load_image
import inversion
import numpy as np

src_style = "medieval painting"
src_prompt = f'Man laying in a bed, {src_style}.'
image_path = './example_image/medieval-bed.jpeg'

num_inference_steps = 50
x0 = np.array(load_image(image_path).resize((1024, 1024)))
zts = inversion.ddim_inversion(pipeline, x0, src_prompt, num_inference_steps, 2)
#mediapy.show_image(x0, title="innput reference image", height=256)

# run StyleAligned
prompts = [
    src_prompt,
    "A man working on a laptop",
    "A man eats pizza",
    "A woman playig on saxophone",
]

# some parameters you can adjust to control fidelity to reference
shared_score_shift = np.log(2)  # higher value induces higher fidelity, set 0 for no shift
shared_score_scale = 1.0  # higher value induces higher, set 1 for no rescale

# for very famouse images consider supressing attention to refference, here is a configuration example:
# shared_score_shift = np.log(1)
# shared_score_scale = 0.5

for i in range(1, len(prompts)):
    prompts[i] = f'{prompts[i]}, {src_style}.'

handler = sa_handler.Handler(pipeline)
sa_args = sa_handler.StyleAlignedArgs(
    share_group_norm=True, share_layer_norm=True, share_attention=True,
    adain_queries=True, adain_keys=True, adain_values=False,
    shared_score_shift=shared_score_shift, shared_score_scale=shared_score_scale,)
handler.register(sa_args)

zT, inversion_callback = inversion.make_inversion_callback(zts, offset=5)

g_cpu = torch.Generator(device='cpu')
g_cpu.manual_seed(10)

latents = torch.randn(len(prompts), 4, 128, 128, device='cpu', generator=g_cpu,
                      dtype=pipeline.unet.dtype,).to('cuda:0')
latents[0] = zT

images_a = pipeline(prompts, latents=latents,
                    callback_on_step_end=inversion_callback,
                    num_inference_steps=num_inference_steps, guidance_scale=10.0).images

handler.remove()
mediapy.show_images(images_a, titles=[p[:-(len(src_style) + 3)] for p in prompts])


@spaces.GPU
def infer(prompts):


    images = pipeline(sets_of_prompts,).images
    return images_a

gr.Interface(
    fn=infer,
    inputs=[
        gr.Textbox(value="Hit submit button to test")
    ],
    outputs=[
        gr.Gallery()
    ],
    title="Style Aligned Image Generation"
).launch()