Riccardo Giorato
fixes
b5da431
raw
history blame
9.8 kB
from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
import gradio as gr
import torch
from PIL import Image
import utils
is_colab = utils.is_google_colab()
class Model:
def __init__(self, name, path, prefix):
self.name = name
self.path = path
self.prefix = prefix
self.pipe_t2i = None
self.pipe_i2i = None
models = [
Model("Beeple", "riccardogiorato/beeple-diffusion", "beeple style "),
Model("Avatar", "riccardogiorato/avatar-diffusion", "avatartwow style "),
Model("Beksinski", "s3nh/beksinski-style-stable-diffusion", "beksinski style "),
Model("Poolsuite","prompthero/poolsuite","poolsuite style "),
Model("Robo Diffusion", "nousr/robo-diffusion", ""),
Model("Guohua", "Langboat/Guohua-Diffusion", "guohua style ")
Model("JWST", "dallinmackay/JWST-Deep-Space-diffusion", "JWST ")
]
scheduler = DPMSolverMultistepScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
num_train_timesteps=1000,
trained_betas=None,
predict_epsilon=True,
thresholding=False,
algorithm_type="dpmsolver++",
solver_type="midpoint",
lower_order_final=True,
)
custom_model = None
if is_colab:
models.insert(0, Model("Custom model", "", ""))
custom_model = models[0]
last_mode = "txt2img"
current_model = models[1] if is_colab else models[0]
current_model_path = current_model.path
if is_colab:
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler)
else: # download all models
vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
for model in models:
try:
unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
except:
models.remove(model)
pipe = models[0].pipe_t2i
if torch.cuda.is_available():
pipe = pipe.to("cuda")
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
def custom_model_changed(path):
models[0].path = path
global current_model
current_model = models[0]
def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
global current_model
for model in models:
if model.name == model_name:
current_model = model
model_path = current_model.path
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
if img is not None:
return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator)
else:
return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator)
def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator=None):
global last_mode
global pipe
global current_model_path
if model_path != current_model_path or last_mode != "txt2img":
current_model_path = model_path
if is_colab or current_model == custom_model:
pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
else:
pipe.to("cpu")
pipe = current_model.pipe_t2i
if torch.cuda.is_available():
pipe = pipe.to("cuda")
last_mode = "txt2img"
prompt = current_model.prefix + prompt
result = pipe(
prompt,
negative_prompt = neg_prompt,
# num_images_per_prompt=n_images,
num_inference_steps = int(steps),
guidance_scale = guidance,
width = width,
height = height,
generator = generator)
return replace_nsfw_images(result)
def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator=None):
global last_mode
global pipe
global current_model_path
if model_path != current_model_path or last_mode != "img2img":
current_model_path = model_path
if is_colab or current_model == custom_model:
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
else:
pipe.to("cpu")
pipe = current_model.pipe_i2i
if torch.cuda.is_available():
pipe = pipe.to("cuda")
last_mode = "img2img"
prompt = current_model.prefix + prompt
ratio = min(height / img.height, width / img.width)
img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
result = pipe(
prompt,
negative_prompt = neg_prompt,
# num_images_per_prompt=n_images,
init_image = img,
num_inference_steps = int(steps),
strength = strength,
guidance_scale = guidance,
width = width,
height = height,
generator = generator)
return replace_nsfw_images(result)
def replace_nsfw_images(results):
for i in range(len(results.images)):
if results.nsfw_content_detected[i]:
results.images[i] = Image.open("nsfw.png")
return results.images[0]
css = """.playground-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.playground-diffusion-div div h1{font-weight:900;margin-bottom:7px}.playground-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
"""
with gr.Blocks(css=css) as demo:
gr.HTML(
f"""
<div class="playground-diffusion-div">
<div>
<h1>Playground Diffusion</h1>
</div>
<p>
Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
<a href="https://huggingface.co/riccardogiorato/avatar-diffusion">Avatar</a>,<br/>
<a href="https://huggingface.co/riccardogiorato/beeple-diffusion">Beeple</a>,<br/>
<a href="https://huggingface.co/s3nh/beksinski-style-stable-diffusion">Beksinski</a>,<br/>
Diffusers 🧨 SD model hosted on HuggingFace 🤗.
</p>
Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
</p>
</div>
"""
)
with gr.Row():
with gr.Column(scale=55):
with gr.Group():
model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
with gr.Row():
prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
image_out = gr.Image(height=512)
# gallery = gr.Gallery(
# label="Generated images", show_label=False, elem_id="gallery"
# ).style(grid=[1], height="auto")
with gr.Column(scale=45):
with gr.Tab("Options"):
with gr.Group():
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
# n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
with gr.Row():
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
with gr.Row():
width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
with gr.Tab("Image to image"):
with gr.Group():
image = gr.Image(label="Image", height=256, tool="editor", type="pil")
strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
if is_colab:
model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_group)
# n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
prompt.submit(inference, inputs=inputs, outputs=image_out)
generate.click(inference, inputs=inputs, outputs=image_out)
ex = gr.Examples([
[models[0].name, "Neon techno-magic robot with spear pierces an ancient beast, hyperrealism, no blur, 4k resolution, ultra detailed", 7.5, 50],
[models[0].name, "halfturn portrait of a big crystal face of a beautiful abstract ancient Egyptian elderly shaman woman, made of iridescent golden crystals, half - turn, bottom view, ominous, intricate, studio, art by anthony macbain and greg rutkowski and alphonse mucha, concept art, 4k, sharp focus", 7.5, 25],
], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=False)
gr.HTML("""
<p>Models by <a href="https://huggingface.co/riccardogiorato">@riccardogiorato</a><br></p>
""")
if not is_colab:
demo.queue(concurrency_count=1)
demo.launch(debug=is_colab, share=is_colab)