|
from diffusers import StableDiffusionXLPipeline, AutoencoderKL |
|
import torch |
|
|
|
|
|
import gradio as gr |
|
|
|
|
|
model_base = "stabilityai/stable-diffusion-xl-base-1.0" |
|
|
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) |
|
|
|
|
|
|
|
|
|
|
|
pipe = StableDiffusionXLPipeline.from_single_file( |
|
"https://huggingface.co/Krebzonide/Colossus_Project_XL/blob/main/colossusProjectXLSFW_v202BakedVAE.safetensors", |
|
torch_dtype = torch.float16, |
|
variant = "fp16", |
|
vae = vae, |
|
use_safetensors = True, |
|
scheduler_type = "ddim" |
|
) |
|
pipe = pipe.to("cuda") |
|
|
|
css = """ |
|
.btn-green { |
|
background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important; |
|
border-color: #22c55e !important; |
|
color: #166534 !important; |
|
} |
|
.btn-green:hover { |
|
background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important; |
|
} |
|
""" |
|
|
|
def generate(prompt, neg_prompt, samp_steps, guide_scale, lora_scale, progress=gr.Progress(track_tqdm=True)): |
|
images = pipe( |
|
prompt, |
|
negative_prompt=neg_prompt, |
|
num_inference_steps=samp_steps, |
|
guidance_scale=guide_scale, |
|
|
|
num_images_per_prompt=1, |
|
|
|
).images |
|
return [(img, f"Image {i+1}") for i, img in enumerate(images)] |
|
|
|
|
|
with gr.Blocks(css=css) as demo: |
|
with gr.Column(): |
|
prompt = gr.Textbox(label="Prompt") |
|
negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, bad hands, cropped, worst quality, disfigured, deformed, extra limbs, asian, filter, render") |
|
submit_btn = gr.Button("Generate", elem_classes="btn-green") |
|
gallery = gr.Gallery(label="Generated images", height=1100) |
|
with gr.Row(): |
|
samp_steps = gr.Slider(1, 100, value=25, step=1, label="Sampling steps") |
|
guide_scale = gr.Slider(1, 10, value=6, step=0.5, label="Guidance scale") |
|
lora_scale = gr.Slider(0, 1, value=0.5, step=0.01, label="LoRA power") |
|
|
|
submit_btn.click(generate, [prompt, negative_prompt, samp_steps, guide_scale, lora_scale], [gallery], queue=True) |
|
|
|
demo.queue(1) |
|
demo.launch(debug=True) |