prithivMLmods's picture
Update app.py
678ab98 verified
raw
history blame
17.3 kB
import os
import gradio as gr
import json
import logging
import torch
from PIL import Image
import spaces
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
from diffusers.utils import load_image
from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard
import copy
import random
import time
import re
# Load LoRAs from JSON file
with open('loras.json', 'r') as f:
loras = json.load(f)
# Initialize the base model for SDXL
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
device = "cuda" if torch.cuda.is_available() else "cpu"
base_model = "stabilityai/stable-diffusion-xl-base-1.0"
# Load SDXL pipelines
pipe = StableDiffusionXLPipeline.from_pretrained(
base_model,
torch_dtype=dtype,
use_safetensors=True
).to(device)
pipe_i2i = StableDiffusionXLImg2ImgPipeline.from_pretrained(
base_model,
torch_dtype=dtype,
use_safetensors=True
).to(device)
MAX_SEED = 2**32 - 1
# Custom SDXL generation function for live preview
@torch.inference_mode()
def generate_sdxl_images(
pipe,
prompt: str,
height: int = 1024,
width: int = 1024,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
generator: Optional[torch.Generator] = None,
output_type: str = "pil",
):
# Encode prompt
prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = pipe.encode_prompt(
prompt=prompt,
num_images_per_prompt=1,
do_classifier_free_guidance=True,
)
# Prepare latents
latents = pipe.prepare_latents(
batch_size=1,
num_channels_latents=pipe.unet.config.in_channels,
height=height,
width=width,
dtype=prompt_embeds.dtype,
device=pipe.device,
generator=generator,
)
# Prepare timesteps
pipe.scheduler.set_timesteps(num_inference_steps, device=pipe.device)
timesteps = pipe.scheduler.timesteps
# Prepare guidance
do_classifier_free_guidance = guidance_scale > 1.0
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds])
# Denoising loop
for i, t in enumerate(timesteps):
# Expand latents for guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
# Predict noise
noise_pred = pipe.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
added_cond_kwargs={"text_embeds": pooled_prompt_embeds},
).sample
# Perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# Step scheduler
latents = pipe.scheduler.step(noise_pred, t, latents).prev_sample
# Decode latents to image every step
image = pipe.vae.decode(latents / pipe.vae.config.scaling_factor, return_dict=False)[0]
yield pipe.image_processor.postprocess(image, output_type=output_type)[0]
# Final image
image = pipe.vae.decode(latents / pipe.vae.config.scaling_factor, return_dict=False)[0]
yield pipe.image_processor.postprocess(image, output_type=output_type)[0]
class calculateDuration:
def __init__(self, activity_name=""):
self.activity_name = activity_name
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end_time = time.time()
self.elapsed_time = self.end_time - self.start_time
if self.activity_name:
print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
else:
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
def update_selection(evt: gr.SelectData, width, height):
selected_lora = loras[evt.index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
if "aspect" in selected_lora:
if selected_lora["aspect"] == "portrait":
width = 768
height = 1024
elif selected_lora["aspect"] == "landscape":
width = 1024
height = 768
else:
width = 1024
height = 1024
return (
gr.update(placeholder=new_placeholder),
updated_text,
evt.index,
width,
height,
)
@spaces.GPU(duration=70)
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
pipe.to("cuda")
generator = torch.Generator(device="cuda").manual_seed(seed)
with calculateDuration("Generating image"):
for img in generate_sdxl_images(
pipe,
prompt=prompt_mash,
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
output_type="pil",
):
yield img
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
generator = torch.Generator(device="cuda").manual_seed(seed)
pipe_i2i.to("cuda")
image_input = load_image(image_input_path)
final_image = pipe_i2i(
prompt=prompt_mash,
image=image_input,
strength=image_strength,
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
output_type="pil",
).images[0]
return final_image
@spaces.GPU(duration=70)
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
if selected_index is None:
raise gr.Error("You must select a LoRA before proceeding.")
selected_lora = loras[selected_index]
lora_path = selected_lora["repo"]
trigger_word = selected_lora["trigger_word"]
if trigger_word:
if "trigger_position" in selected_lora and selected_lora["trigger_position"] == "prepend":
prompt_mash = f"{trigger_word} {prompt}"
else:
prompt_mash = f"{prompt} {trigger_word}"
else:
prompt_mash = prompt
# Unload previous LoRA weights
with calculateDuration("Unloading LoRA"):
pipe.unload_lora_weights()
pipe_i2i.unload_lora_weights()
# Load LoRA weights and set adapter scale
with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
weight_name = selected_lora.get("weights", None)
adapter_name = "lora"
pipe.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=adapter_name)
pipe.set_adapters([adapter_name], [lora_scale])
pipe_i2i.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=adapter_name)
pipe_i2i.set_adapters([adapter_name], [lora_scale])
# Set random seed
with calculateDuration("Randomizing seed"):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
if image_input is not None:
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
yield final_image, seed, gr.update(visible=False)
else:
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
final_image = None
step_counter = 0
for image in image_generator:
step_counter += 1
final_image = image
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
yield image, seed, gr.update(value=progress_bar, visible=True)
yield final_image, seed, gr.update(value=progress_bar, visible=False)
def get_huggingface_safetensors(link):
split_link = link.split("/")
if len(split_link) != 2:
raise Exception("Invalid Hugging Face repository link format.")
# Load model card
model_card = ModelCard.load(link)
base_model = model_card.data.get("base_model")
print(base_model)
# Validate model type for SDXL
if base_model != "stabilityai/stable-diffusion-xl-base-1.0":
raise Exception("Not an SDXL LoRA!")
# Extract image and trigger word
image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
trigger_word = model_card.data.get("instance_prompt", "")
image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
# Initialize Hugging Face file system
fs = HfFileSystem()
try:
list_of_files = fs.ls(link, detail=False)
safetensors_name = None
highest_trained_file = None
highest_steps = -1
last_safetensors_file = None
step_pattern = re.compile(r"_0{3,}\d+") # Detects step count `_000...`
for file in list_of_files:
filename = file.split("/")[-1]
if filename.endswith(".safetensors"):
last_safetensors_file = filename
match = step_pattern.search(filename)
if not match:
safetensors_name = filename
break
else:
steps = int(match.group().lstrip("_"))
if steps > highest_steps:
highest_trained_file = filename
highest_steps = steps
if not image_url and filename.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
image_url = f"https://huggingface.co/{link}/resolve/main/{filename}"
if not safetensors_name:
safetensors_name = highest_trained_file if highest_trained_file else last_safetensors_file
if not safetensors_name:
raise Exception("No valid *.safetensors file found in the repository.")
except Exception as e:
print(e)
raise Exception("You didn't include a valid Hugging Face repository with a *.safetensors LoRA")
return split_link[1], link, safetensors_name, trigger_word, image_url
def check_custom_model(link):
if link.startswith("https://"):
if link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co"):
link_split = link.split("huggingface.co/")
return get_huggingface_safetensors(link_split[1])
else:
return get_huggingface_safetensors(link)
def add_custom_lora(custom_lora):
global loras
if custom_lora:
try:
title, repo, path, trigger_word, image = check_custom_model(custom_lora)
print(f"Loaded custom LoRA: {repo}")
card = f'''
<div class="custom_lora_card">
<span>Loaded custom LoRA:</span>
<div class="card_internal">
<img src="{image}" />
<div>
<h3>{title}</h3>
<small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
</div>
</div>
</div>
'''
existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
if not existing_item_index:
new_item = {
"image": image,
"title": title,
"repo": repo,
"weights": path,
"trigger_word": trigger_word
}
print(new_item)
existing_item_index = len(loras)
loras.append(new_item)
return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
except Exception as e:
gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-SDXL LoRA")
return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-SDXL LoRA"), gr.update(visible=True), gr.update(), "", None, ""
else:
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
def remove_custom_lora():
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
run_lora.zerogpu = True
css = '''
#gen_btn{height: 100%}
#gen_column{align-self: stretch}
#title{text-align: center}
#title h1{font-size: 3em; display:inline-flex; align-items:center}
#title img{width: 100px; margin-right: 0.5em}
#gallery .grid-wrap{height: 10vh}
#lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
.card_internal{display: flex;height: 100px;margin-top: .5em}
.card_internal img{margin-right: 1em}
.styler{--form-gap-width: 0px !important}
#progress{height:30px}
#progress .generating{display:none}
.progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
.progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
'''
font = [gr.themes.GoogleFont("Source Sans Pro"), "Arial", "sans-serif"]
with gr.Blocks(theme=gr.themes.Soft(font=font), css=css, delete_cache=(60, 60)) as app:
title = gr.HTML(
"""<h1>SDXL LoRA DLC</h1>""",
elem_id="title",
)
selected_index = gr.State(None)
with gr.Row():
with gr.Column(scale=3):
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
with gr.Column(scale=1, elem_id="gen_column"):
generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
with gr.Row():
with gr.Column():
selected_info = gr.Markdown("")
gallery = gr.Gallery(
[(item["image"], item["title"]) for item in loras],
label="LoRA Gallery",
allow_preview=False,
columns=3,
elem_id="gallery",
show_share_button=False
)
with gr.Group():
custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path", placeholder="username/sdxl-lora-model")
gr.Markdown("[Check the list of SDXL LoRAs](https://huggingface.co/models?other=base_model:stabilityai/stable-diffusion-xl-base-1.0)", elem_id="lora_list")
custom_lora_info = gr.HTML(visible=False)
custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
with gr.Column():
progress_bar = gr.Markdown(elem_id="progress", visible=False)
result = gr.Image(label="Generated Image")
with gr.Row():
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
input_image = gr.Image(label="Input image", type="filepath")
image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
with gr.Column():
with gr.Row():
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=7.5)
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=30)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
with gr.Row():
randomize_seed = gr.Checkbox(True, label="Randomize seed")
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=3, step=0.01, value=1.0)
gallery.select(
update_selection,
inputs=[width, height],
outputs=[prompt, selected_info, selected_index, width, height]
)
custom_lora.input(
add_custom_lora,
inputs=[custom_lora],
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
)
custom_lora_button.click(
remove_custom_lora,
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
)
gr.on(
triggers=[generate_button.click, prompt.submit],
fn=run_lora,
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
outputs=[result, seed, progress_bar]
)
app.queue()
app.launch()