Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,254 Bytes
1cb6bad ee7720a 460c68a 1cb6bad ee7720a 1cb6bad ee7720a 1cb6bad 0473e4c ee7720a e769f83 b879745 1cb6bad b879745 e769f83 1cb6bad e769f83 b879745 276f269 b879745 ee7720a b879745 ee7720a b879745 ee7720a b879745 ee7720a b879745 ee7720a b879745 ee7720a ceb312c b879745 ee7720a ceb312c b879745 ee7720a b879745 ee7720a b879745 1cb6bad ee7720a 1cb6bad 2ea2166 e769f83 2ea2166 e769f83 2ea2166 e769f83 2ea2166 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import gradio as gr
from diffusers import StableDiffusionXLPipeline, DDIMScheduler
import torch
import sa_handler
import math
from diffusers.utils import load_image
import inversion
import numpy as np
# init models
scheduler = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear",
clip_sample=False, set_alpha_to_one=False)
pipeline = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16",
use_safetensors=True,
scheduler=scheduler
).to("cuda")
pipeline.enable_model_cpu_offload()
pipeline.enable_vae_slicing()
# DDIM inversion
global zts, src_style, src_prompt
def load_refs(ref_path, ref_style, ref_prompt, prompt1, prompt2, prompt3):
src_style = f"{ref_style}"
src_prompt = f"{ref_prompt}, {src_style}."
image_path = f"{ref_path}"
num_inference_steps = 50
x0 = np.array(load_image(image_path).resize((1024, 1024)))
zts = inversion.ddim_inversion(pipeline, x0, src_prompt, num_inference_steps, 2)
#mediapy.show_image(x0, title="innput reference image", height=256)
return "ready"
def run(prompt1, prompt2, prompt3):
# run StyleAligned
prompts = [
src_prompt,
prompt1,
prompt2,
prompt3
]
# some parameters you can adjust to control fidelity to reference
shared_score_shift = np.log(2) # higher value induces higher fidelity, set 0 for no shift
shared_score_scale = 1.0 # higher value induces higher, set 1 for no rescale
# for very famouse images consider supressing attention to refference, here is a configuration example:
# shared_score_shift = np.log(1)
# shared_score_scale = 0.5
for i in range(1, len(prompts)):
prompts[i] = f'{prompts[i]}, {src_style}.'
handler = sa_handler.Handler(pipeline)
sa_args = sa_handler.StyleAlignedArgs(
share_group_norm=True, share_layer_norm=True, share_attention=True,
adain_queries=True, adain_keys=True, adain_values=False,
shared_score_shift=shared_score_shift, shared_score_scale=shared_score_scale,)
handler.register(sa_args)
zT, inversion_callback = inversion.make_inversion_callback(zts, offset=5)
g_cpu = torch.Generator(device='cuda')
g_cpu.manual_seed(10)
latents = torch.randn(len(prompts), 4, 128, 128, device='cuda', generator=g_cpu,
dtype=pipeline.unet.dtype,).to('cuda')
latents[0] = zT
images_a = pipeline(prompts, latents=latents,
callback_on_step_end=inversion_callback,
num_inference_steps=num_inference_steps, guidance_scale=10.0).images
handler.remove()
#mediapy.show_images(images_a, titles=[p[:-(len(src_style) + 3)] for p in prompts])
return images_a
with gr.Blocks() as demo:
with gr. Column():
gr.HTML("""
<h2 style="text-align: center;">Google's StyleAligned Transfer</h2>
"""
)
with gr.Row():
with gr.Column():
ref_path = gr.Image(type="filepath", value="./example_image/medieval-bed.jpeg")
ref_style = gr.Textbox(label="Reference style", value="medieval painting")
ref_prompt = gr.Textbox(label="Referéce prompt", value="Man laying on bed")
load_ref_btn = gr.Button("Load Reference")
with gr.Column():
is_ready = gr.Textbox(label = "Ref status")
prompt1 = gr.Textbox(label="Prompt1", value="A man working on a laptop")
prompt2 = gr.Textbox(label="Prompt2", value="A man eating pizza")
prompt3 = gr.Textbox(label="Prompt3", value="A woman playing on saxophone")
run_button = gr.Button("Submit")
with gr.Column():
results = gr.Gallery()
load_ref_btn.click(
fn = load_refs,
inputs = [
ref_path,
ref_style,
ref_prompt
],
outputs = [
is_ready
],
queue = False
)
run_button.click(
fn = run,
inputs = [
ref_path, ref_style, ref_prompt,
prompt1, prompt2, prompt3
],
outputs = [
results
]
)
demo.queue().launch() |