Duskfallcrew's picture
Update app.py
2432f38 verified
raw
history blame
20.7 kB
import os
import gradio as gr
import torch
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, AutoencoderKL
from transformers import CLIPTextModel, CLIPTextConfig
from safetensors.torch import load_file
from collections import OrderedDict
import re
import json
import gdown
import requests
import subprocess
from urllib.parse import urlparse, unquote
from pathlib import Path
import tempfile
from tqdm import tqdm
# ---------------------- UTILITY FUNCTIONS ----------------------
def is_valid_url(url):
"""Checks if a string is a valid URL."""
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except:
return False
def get_filename(url):
response = requests.get(url, stream=True)
response.raise_for_status()
if 'content-disposition' in response.headers:
content_disposition = response.headers['content-disposition']
filename = re.findall('filename="?([^"]+)"?', content_disposition)[0]
else:
url_path = urlparse(url).path
filename = unquote(os.path.basename(url_path))
return filename
def get_supported_extensions():
return tuple([".ckpt", ".safetensors", ".pt", ".pth"])
def download_model(url, dst, output_widget):
filename = get_filename(url)
filepath = os.path.join(dst, filename)
try:
if "drive.google.com" in url:
gdown = gdown_download(url, dst, filepath)
else:
if "huggingface.co" in url:
if "/blob/" in url:
url = url.replace("/blob/", "/resolve/")
subprocess.run(["aria2c","-x 16",url,"-d",dst,"-o",filename])
with output_widget:
return filepath
except Exception as e:
with output_widget:
return None
def determine_load_checkpoint(model_to_load):
"""Determines if the model to load is a checkpoint, Diffusers model, or URL."""
if is_valid_url(model_to_load) and (model_to_load.endswith(get_supported_extensions())):
return True
elif model_to_load.endswith(get_supported_extensions()):
return True
elif os.path.isdir(model_to_load):
required_folders = {"unet", "text_encoder", "text_encoder_2", "tokenizer", "tokenizer_2", "scheduler", "vae"}
if required_folders.issubset(set(os.listdir(model_to_load))) and os.path.isfile(os.path.join(model_to_load, "model_index.json")):
return False
return None # handle this case as required
def create_model_repo(api, user, orgs_name, model_name, make_private=False):
"""Creates a Hugging Face model repository if it doesn't exist."""
if orgs_name == "":
repo_id = user["name"] + "/" + model_name.strip()
else:
repo_id = orgs_name + "/" + model_name.strip()
try:
validate_repo_id(repo_id)
api.create_repo(repo_id=repo_id, repo_type="model", private=make_private)
print(f"Model repo '{repo_id}' didn't exist, creating repo")
except HfHubHTTPError as e:
print(f"Model repo '{repo_id}' exists, skipping create repo")
print(f"Model repo '{repo_id}' link: https://huggingface.co/{repo_id}\n")
return repo_id
def is_diffusers_model(model_path):
"""Checks if a given path is a valid Diffusers model directory."""
required_folders = {"unet", "text_encoder", "text_encoder_2", "tokenizer", "tokenizer_2", "scheduler", "vae"}
return required_folders.issubset(set(os.listdir(model_path))) and os.path.isfile(os.path.join(model_path, "model_index.json"))
# ---------------------- MODEL UTIL (From library.sdxl_model_util) ----------------------
def load_models_from_sdxl_checkpoint(sdxl_base_id, checkpoint_path, device):
"""Loads SDXL model components from a checkpoint file."""
text_encoder1 = CLIPTextModel.from_pretrained(sdxl_base_id, subfolder="text_encoder").to(device)
text_encoder2 = CLIPTextModel.from_pretrained(sdxl_base_id, subfolder="text_encoder_2").to(device)
vae = AutoencoderKL.from_pretrained(sdxl_base_id, subfolder="vae").to(device)
unet = UNet2DConditionModel.from_pretrained(sdxl_base_id, subfolder="unet").to(device)
unet = unet
ckpt_state_dict = torch.load(checkpoint_path, map_location=device)
o = OrderedDict()
for key in list(ckpt_state_dict.keys()):
o[key.replace("module.", "")] = ckpt_state_dict[key]
del ckpt_state_dict
print("Applying weights to text encoder 1:")
text_encoder1.load_state_dict({
'.'.join(key.split('.')[1:]): o[key] for key in list(o.keys()) if key.startswith("first_stage_model.cond_stage_model.model.transformer")
}, strict=False)
print("Applying weights to text encoder 2:")
text_encoder2.load_state_dict({
'.'.join(key.split('.')[1:]): o[key] for key in list(o.keys()) if key.startswith("cond_stage_model.model.transformer")
}, strict=False)
print("Applying weights to VAE:")
vae.load_state_dict({
'.'.join(key.split('.')[2:]): o[key] for key in list(o.keys()) if key.startswith("first_stage_model.model")
}, strict=False)
print("Applying weights to UNet:")
unet.load_state_dict({
key: o[key] for key in list(o.keys()) if key.startswith("model.diffusion_model")
}, strict=False)
logit_scale = None #Not used here!
global_step = None #Not used here!
return text_encoder1, text_encoder2, vae, unet, logit_scale, global_step
def save_stable_diffusion_checkpoint(save_path, text_encoder1, text_encoder2, unet, epoch, global_step, ckpt_info, vae, logit_scale, save_dtype):
"""Saves the stable diffusion checkpoint."""
weights = OrderedDict()
text_encoder1_dict = text_encoder1.state_dict()
text_encoder2_dict = text_encoder2.state_dict()
unet_dict = unet.state_dict()
vae_dict = vae.state_dict()
def replace_key(key):
key = "cond_stage_model.model.transformer." + key
return key
print("Merging text encoder 1")
for key in tqdm(list(text_encoder1_dict.keys())):
weights["first_stage_model.cond_stage_model.model.transformer." + key] = text_encoder1_dict[key].to(save_dtype)
print("Merging text encoder 2")
for key in tqdm(list(text_encoder2_dict.keys())):
weights[replace_key(key)] = text_encoder2_dict[key].to(save_dtype)
print("Merging vae")
for key in tqdm(list(vae_dict.keys())):
weights["first_stage_model.model." + key] = vae_dict[key].to(save_dtype)
print("Merging unet")
for key in tqdm(list(unet_dict.keys())):
weights["model.diffusion_model." + key] = unet_dict[key].to(save_dtype)
info = {"epoch": epoch, "global_step": global_step}
if ckpt_info is not None:
info.update(ckpt_info)
if logit_scale is not None:
info["logit_scale"] = logit_scale.item()
torch.save({"state_dict": weights, "info": info}, save_path)
key_count = len(weights.keys())
del weights
del text_encoder1_dict, text_encoder2_dict, unet_dict, vae_dict
return key_count
def save_diffusers_checkpoint(save_path, text_encoder1, text_encoder2, unet, reference_model, vae, trim_if_model_exists, save_dtype):
"""Saves Diffusers-style checkpoint from the model."""
print("Saving SDXL as Diffusers format to:", save_path)
print("SDXL Text Encoder 1 to:", os.path.join(save_path, "text_encoder"))
text_encoder1.save_pretrained(os.path.join(save_path, "text_encoder"))
print("SDXL Text Encoder 2 to:", os.path.join(save_path, "text_encoder_2"))
text_encoder2.save_pretrained(os.path.join(save_path, "text_encoder_2"))
print("SDXL VAE to:", os.path.join(save_path, "vae"))
vae.save_pretrained(os.path.join(save_path, "vae"))
print("SDXL UNet to:", os.path.join(save_path, "unet"))
unet.save_pretrained(os.path.join(save_path, "unet"))
if reference_model is not None:
print(f"Copying scheduler from {reference_model}")
scheduler_src = StableDiffusionXLPipeline.from_pretrained(reference_model, torch_dtype=torch.float16).scheduler
torch.save(scheduler_src.config, os.path.join(save_path, "scheduler", "scheduler_config.json"))
else:
print(f"No reference Model. Copying scheduler from original model.")
scheduler_src = StableDiffusionXLPipeline.from_pretrained(reference_model, torch_dtype=torch.float16).scheduler
scheduler_src.save_pretrained(save_path)
if trim_if_model_exists:
print("Trim Complete")
# ---------------------- CONVERSION AND UPLOAD FUNCTIONS ----------------------
def load_sdxl_model(args, is_load_checkpoint, load_dtype, output_widget):
"""Loads the SDXL model from a checkpoint or Diffusers model."""
model_load_message = "checkpoint" if is_load_checkpoint else "Diffusers" + (" as fp16" if args.fp16 else "")
with output_widget:
print(f"Loading {model_load_message}: {args.model_to_load}")
if is_load_checkpoint:
loaded_model_data = load_from_sdxl_checkpoint(args, output_widget)
else:
loaded_model_data = load_sdxl_from_diffusers(args, load_dtype)
return loaded_model_data
def load_from_sdxl_checkpoint(args, output_widget):
"""Loads the SDXL model components from a checkpoint file (placeholder)."""
text_encoder1, text_encoder2, vae, unet = None, None, None, None
device = "cpu"
if is_valid_url(args.model_to_load):
tmp_model_name = "download"
download_dst_dir = tempfile.mkdtemp()
model_path = download_model(args.model_to_load, download_dst_dir, output_widget)
#model_path = os.path.join(download_dst_dir,tmp_model_name)
if model_path == None:
with output_widget:
print("Loading from Checkpoint failed, the request could not be completed")
return text_encoder1, text_encoder2, vae, unet
else:
# Implement Load model from ckpt or safetensors
try:
text_encoder1, text_encoder2, vae, unet, _, _ = load_models_from_sdxl_checkpoint(
"sdxl_base_v1-0", model_path, device
)
return text_encoder1, text_encoder2, vae, unet
except Exception as e:
print(f"Could not load SDXL from checkpoint due to: \n{e}")
return text_encoder1, text_encoder2, vae, unet
with output_widget:
print(f"Loading from Checkpoint from URL needs to be implemented - using {model_path}")
else:
# Implement Load model from ckpt or safetensors
try:
text_encoder1, text_encoder2, vae, unet, _, _ = load_models_from_sdxl_checkpoint(
"sdxl_base_v1-0", args.model_to_load, device
)
return text_encoder1, text_encoder2, vae, unet
except Exception as e:
print(f"Could not load SDXL from checkpoint due to: \n{e}")
return text_encoder1, text_encoder2, vae, unet
with output_widget:
print("Loading from Checkpoint needs to be implemented.")
return text_encoder1, text_encoder2, vae, unet
def load_sdxl_from_diffusers(args, load_dtype):
"""Loads an SDXL model from a Diffusers model directory."""
pipeline = StableDiffusionXLPipeline.from_pretrained(
args.model_to_load, torch_dtype=load_dtype, tokenizer=None, tokenizer_2=None, scheduler=None
)
text_encoder1 = pipeline.text_encoder
text_encoder2 = pipeline.text_encoder_2
vae = pipeline.vae
unet = pipeline.unet
return text_encoder1, text_encoder2, vae, unet
def convert_and_save_sdxl_model(args, is_save_checkpoint, loaded_model_data, save_dtype, output_widget):
"""Converts and saves the SDXL model as either a checkpoint or a Diffusers model."""
text_encoder1, text_encoder2, vae, unet = loaded_model_data
model_save_message = "checkpoint" + ("" if save_dtype is None else f" in {save_dtype}") if is_save_checkpoint else "Diffusers"
with output_widget:
print(f"Converting and saving as {model_save_message}: {args.model_to_save}")
if is_save_checkpoint:
save_sdxl_as_checkpoint(args, text_encoder1, text_encoder2, vae, unet, save_dtype, output_widget)
else:
save_sdxl_as_diffusers(args, text_encoder1, text_encoder2, vae, unet, save_dtype, output_widget)
def save_sdxl_as_checkpoint(args, text_encoder1, text_encoder2, vae, unet, save_dtype, output_widget):
"""Saves the SDXL model components as a checkpoint file (placeholder)."""
logit_scale = None
ckpt_info = None
key_count = save_stable_diffusion_checkpoint(
args.model_to_save, text_encoder1, text_encoder2, unet, args.epoch, args.global_step, ckpt_info, vae, logit_scale, save_dtype
)
with output_widget:
print(f"Model saved. Total converted state_dict keys: {key_count}")
def save_sdxl_as_diffusers(args, text_encoder1, text_encoder2, vae, unet, save_dtype, output_widget):
"""Saves the SDXL model as a Diffusers model."""
with output_widget:
reference_model_message = args.reference_model if args.reference_model is not None else 'default model'
print(f"Copying scheduler/tokenizer config from: {reference_model_message}")
# Save diffusers pipeline
pipeline = StableDiffusionXLPipeline(
vae=vae,
text_encoder=text_encoder1,
text_encoder_2=text_encoder2,
unet=unet,
scheduler=None, # Replace None if there is a scheduler
tokenizer=None, # Replace None if there is a tokenizer
tokenizer_2=None # Replace None if there is a tokenizer_2
)
pipeline.save_pretrained(args.model_to_save)
with output_widget:
print(f"Model saved as {save_dtype}.")
def convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, output_widget):
"""Main conversion function."""
class Args: # Defining Args locally within convert_model
def __init__(self, model_to_load, save_precision_as, epoch, global_step, reference_model, output_path, fp16):
self.model_to_load = model_to_load
self.save_precision_as = save_precision_as
self.epoch = epoch
self.global_step = global_step
self.reference_model = reference_model
self.output_path = output_path #Using output_path even if hardcoded
self.fp16 = fp16
with tempfile.TemporaryDirectory() as tmpdirname:
args = Args(model_to_load, save_precision_as, epoch, global_step, reference_model, tmpdirname, fp16)
args.model_to_save = increment_filename(os.path.splitext(args.model_to_load)[0] + ".safetensors")
try:
load_dtype = torch.float16 if fp16 else None
save_dtype = get_save_dtype(save_precision_as)
is_load_checkpoint = determine_load_checkpoint(model_to_load)
is_save_checkpoint = not is_load_checkpoint # reverse of load model
loaded_model_data = load_sdxl_model(args, is_load_checkpoint, load_dtype, output_widget)
convert_and_save_sdxl_model(args, is_save_checkpoint, loaded_model_data, save_dtype, output_widget)
with output_widget:
return f"Conversion complete. Model saved to {args.model_to_save}"
except Exception as e:
with output_widget:
return f"Conversion failed: {e}"
def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_private, output_widget):
"""Uploads a model to the Hugging Face Hub."""
try:
login(hf_token, add_to_git_credential=True)
api = HfApi()
user = api.whoami(hf_token)
model_repo = create_model_repo(api, user, orgs_name, model_name, make_private)
# Determine upload parameters (adjust as needed)
path_in_repo = ""
trained_model = os.path.basename(model_path)
path_in_repo_local = path_in_repo if path_in_repo and not is_diffusers_model(model_path) else ""
notification = f"Uploading {trained_model} from {model_path} to https://huggingface.co/{model_repo}"
with output_widget:
print(notification)
if os.path.isdir(model_path):
if is_diffusers_model(model_path):
commit_message = f"Upload diffusers format: {trained_model}"
print("Detected diffusers model. Adjusting upload parameters.")
else:
commit_message = f"Upload checkpoint: {trained_model}"
print("Detected regular model. Adjusting upload parameters.")
api.upload_folder(
folder_path=model_path,
path_in_repo=path_in_repo_local,
repo_id=model_repo,
commit_message=commit_message,
ignore_patterns=".ipynb_checkpoints",
)
else:
commit_message = f"Upload file: {trained_model}"
api.upload_file(
path_or_fileobj=model_path,
path_in_repo=path_in_repo_local,
repo_id=model_repo,
commit_message=commit_message,
)
with output_widget:
return f"Model upload complete! Check it out at https://huggingface.co/{model_repo}/tree/main"
except Exception as e:
with output_widget:
return f"Upload failed: {e}"
# ---------------------- GRADIO INTERFACE ----------------------
def main(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, hf_token, orgs_name, model_name, make_private):
"""Main function orchestrating the entire process."""
output = gr.Markdown()
# Create tempdir, will only be there for the function
with tempfile.TemporaryDirectory() as output_path:
conversion_output = convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, output)
upload_output = upload_to_huggingface(output_path, hf_token, orgs_name, model_name, make_private, output)
# Return a combined output
return f"{conversion_output}\n\n{upload_output}"
with gr.Blocks() as demo:
# Add initial warnings (only once)
gr.Markdown(f"""
## **⚠️ IMPORTANT WARNINGS ⚠️**
This App is Coded by an LLM partially, and for more information please go here: [Ktiseos Nyx](https://github.com/Ktiseos-Nyx/Sdxl-to-diffusers). The colab edition of this may indeed break AUP. This space is running on CPU and in theory SHOULD work, but may be slow. Earth and Dusk/ Ktiseos Nyx does not have the enterprise budget for ZERO GPU or any gpu sadly! Thank you to the community, John6666 especially for coming to aid when gemini would NOT fix the requirements. Support Ktiseos Nyx & Myself on Ko-fi: [![Ko-fi](https://img.shields.io/badge/Support%20me%20on%20Ko--fi-F16061?logo=ko-fi&logoColor=white&style=flat)](https://ko-fi.com/Z8Z8L4EO)
**Understanding the 'Model to Load' Input:**
This field can accept any of the following:
* A Hugging Face model identifier (e.g., `stabilityai/stable-diffusion-xl-base-1.0`).
* A direct URL to a .ckpt or .safetensors model file.
* **Important:** Huggingface direct links need to end as /resolve/main/ and the name of the model after.
""")
model_to_load = gr.Textbox(label="Model to Load (Checkpoint or Diffusers)", placeholder="Path to model")
with gr.Row():
save_precision_as = gr.Dropdown(
choices=["fp16", "bf16", "float"], value="fp16", label="Save Precision As"
)
fp16 = gr.Checkbox(label="Load as fp16 (Diffusers only)")
with gr.Row():
epoch = gr.Number(value=0, label="Epoch to Write (Checkpoint)")
global_step = gr.Number(value=0, label="Global Step to Write (Checkpoint)")
reference_model = gr.Textbox(label="Reference Diffusers Model",
placeholder="e.g., stabilityai/stable-diffusion-xl-base-1.0")
gr.Markdown("## Hugging Face Hub Configuration")
hf_token = gr.Textbox(type="password", label="Hugging Face Token", placeholder="Your Hugging Face write token") #THIS IS NEEDED
with gr.Row():
orgs_name = gr.Textbox(label="Organization Name (Optional)", placeholder="Your organization name")
model_name = gr.Textbox(label="Model Name", placeholder="The name of your model on Hugging Face")
make_private = gr.Checkbox(label="Make Repository Private", value=False)
convert_button = gr.Button("Convert and Upload")
output = gr.Markdown()
convert_button.click(fn=main,
inputs=[model_to_load, save_precision_as, epoch, global_step, reference_model,
fp16, hf_token, orgs_name, model_name, make_private],
outputs=output)
demo.launch()