Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import random | |
import numpy as np | |
from PIL import Image | |
import torch | |
import torchvision.transforms.functional as F | |
from diffusers import ControlNetModel, StableDiffusionControlNetPipeline | |
import gradio as gr | |
device = "cuda" | |
weight_type = torch.float16 | |
controlnet = ControlNetModel.from_pretrained( | |
"IDKiro/sdxs-512-dreamshaper-sketch", torch_dtype=weight_type | |
).to(device) | |
pipe = StableDiffusionControlNetPipeline.from_pretrained( | |
"IDKiro/sdxs-512-dreamshaper", controlnet=controlnet, torch_dtype=weight_type | |
) | |
pipe.to(device) | |
style_list = [ | |
{ | |
"name": "No Style", | |
"prompt": "{prompt}", | |
}, | |
{ | |
"name": "Cinematic", | |
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy", | |
}, | |
{ | |
"name": "3D Model", | |
"prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting", | |
}, | |
{ | |
"name": "Anime", | |
"prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed", | |
}, | |
{ | |
"name": "Digital Art", | |
"prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed", | |
}, | |
{ | |
"name": "Photographic", | |
"prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed", | |
}, | |
{ | |
"name": "Pixel art", | |
"prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics", | |
}, | |
{ | |
"name": "Fantasy art", | |
"prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy", | |
}, | |
{ | |
"name": "Neonpunk", | |
"prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional", | |
}, | |
{ | |
"name": "Manga", | |
"prompt": "manga style {prompt} . vibrant, high-energy, detailed, iconic, Japanese comic style", | |
}, | |
] | |
styles = {k["name"]: k["prompt"] for k in style_list} | |
STYLE_NAMES = list(styles.keys()) | |
DEFAULT_STYLE_NAME = "No Style" | |
MAX_SEED = np.iinfo(np.int32).max | |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
return seed | |
def run( | |
image, | |
prompt, | |
prompt_template, | |
style_name, | |
controlnet_conditioning_scale, | |
device_type="GPU", | |
param_dtype="torch.float16", | |
): | |
if device_type == "CPU": | |
device = "cpu" | |
param_dtype = "torch.float32" | |
else: | |
device = "cuda" | |
pipe.to( | |
torch_device=device, | |
torch_dtype=torch.float16 if param_dtype == "torch.float16" else torch.float32, | |
) | |
print(f"prompt: {prompt}") | |
print("sketch updated") | |
if image is None: | |
ones = Image.new("L", (512, 512), 255) | |
return ones | |
prompt = prompt_template.replace("{prompt}", prompt) | |
control_image = Image.fromarray(255 - np.array(image["composite"])[:, :, -1]) | |
output_pil = pipe( | |
prompt=prompt, | |
image=control_image, | |
width=512, | |
height=512, | |
guidance_scale=0.0, | |
num_inference_steps=1, | |
num_images_per_prompt=1, | |
output_type="pil", | |
controlnet_conditioning_scale=float(controlnet_conditioning_scale), | |
).images[0] | |
return output_pil | |
with gr.Blocks() as demo: | |
gr.Markdown("# SDXS-512-DreamShaper-Sketch") | |
gr.Markdown("[SDXS: Real-Time One-Step Latent Diffusion Models with Image Conditions](https://arxiv.org/abs/2403.16627) | [GitHub](https://github.com/IDKiro/sdxs)") | |
with gr.Row(elem_id="main_row"): | |
with gr.Column(elem_id="column_input"): | |
gr.Markdown("## INPUT", elem_id="input_header") | |
image = gr.Sketchpad( | |
type="pil", | |
image_mode="RGBA", | |
brush=gr.Brush(colors=["#000000"], color_mode="fixed", default_size=8), | |
crop_size=(512, 512), | |
) | |
# gr.Markdown("## Prompt", elem_id="tools_header") | |
prompt = gr.Textbox(label="Prompt", value="", show_label=True) | |
with gr.Row(): | |
style = gr.Dropdown( | |
label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME, scale=1 | |
) | |
prompt_temp = gr.Textbox( | |
label="Prompt Style Template", | |
value=styles[DEFAULT_STYLE_NAME], | |
scale=2, | |
max_lines=1, | |
) | |
controlnet_conditioning_scale = gr.Slider( | |
label="Control Strength", minimum=0, maximum=1, step=0.01, value=0.8 | |
) | |
device_choices = ["GPU", "CPU"] | |
device_type = gr.Radio( | |
device_choices, | |
label="Device", | |
value=device_choices[0], | |
interactive=True, | |
info="Many thanks to the community for the GPU!", | |
) | |
dtype_choices = ["torch.float16", "torch.float32"] | |
param_dtype = gr.Radio( | |
dtype_choices, | |
label="torch.weight_type", | |
value=dtype_choices[0], | |
interactive=True, | |
info="To save GPU memory, use torch.float16. For better quality, use torch.float32.", | |
) | |
with gr.Column(elem_id="column_output"): | |
gr.Markdown("## OUTPUT", elem_id="output_header") | |
result = gr.Image( | |
label="Result", | |
height=512, | |
width=512, | |
elem_id="output_image", | |
show_label=False, | |
show_download_button=True, | |
) | |
inputs = [ | |
image, | |
prompt, | |
prompt_temp, | |
style, | |
controlnet_conditioning_scale, | |
device_type, | |
param_dtype, | |
] | |
outputs = [result] | |
prompt.change(fn=run, inputs=inputs, outputs=outputs) | |
style.change(lambda x: styles[x], inputs=[style], outputs=[prompt_temp]).then( | |
fn=run, inputs=inputs, outputs=outputs,) | |
image.change(run, inputs=inputs, outputs=outputs,) | |
controlnet_conditioning_scale.change(run, inputs=inputs, outputs=outputs,) | |
if __name__ == "__main__": | |
demo.queue().launch() | |