|
from diffusers import StableDiffusionXLPipeline, AutoencoderKL |
|
import torch |
|
import random |
|
|
|
|
|
import gradio as gr |
|
import gc |
|
|
|
model_id = os.getenv("Model") |
|
|
|
|
|
|
|
|
|
|
|
model_url_list = ["stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors", |
|
"Krebzonide/Colossus_Project_XL/blob/main/colossusProjectXLSFW_v202BakedVAE.safetensors", |
|
"Krebzonide/Sevenof9_v3_sdxl/blob/main/nsfwSevenof9V3_nsfwSevenof9V3.safetensors"] |
|
|
|
css = """ |
|
.btn-green { |
|
background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important; |
|
border-color: #22c55e !important; |
|
color: #166534 !important; |
|
} |
|
.btn-green:hover { |
|
background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important; |
|
} |
|
""" |
|
|
|
def generate(prompt, neg_prompt, samp_steps, guide_scale, batch_size, seed, height, width, progress=gr.Progress(track_tqdm=True)): |
|
if seed < 0: |
|
seed = random.randint(1,999999) |
|
images = pipe( |
|
prompt, |
|
negative_prompt=neg_prompt, |
|
num_inference_steps=samp_steps, |
|
guidance_scale=guide_scale, |
|
|
|
num_images_per_prompt=batch_size, |
|
height=height, |
|
width=width, |
|
generator=torch.manual_seed(seed), |
|
).images |
|
return [(img, f"Image {i+1}") for i, img in enumerate(images)] |
|
|
|
def set_base_model(base_model_id): |
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) |
|
global model_url_list |
|
model_url = "https://huggingface.co/" + model_url_list[base_model_id] |
|
pipe = StableDiffusionXLPipeline.from_single_file( |
|
model_url, |
|
torch_dtype = torch.float16, |
|
variant = "fp16", |
|
vae = vae, |
|
use_safetensors = True, |
|
use_auth_token="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" |
|
) |
|
pipe.to("cuda") |
|
return pipe |
|
|
|
with gr.Blocks(css=css) as demo: |
|
with gr.Column(): |
|
prompt = gr.Textbox(label="Prompt") |
|
negative_prompt = gr.Textbox(label="Negative Prompt") |
|
submit_btn = gr.Button("Generate", elem_classes="btn-green") |
|
with gr.Row(): |
|
samp_steps = gr.Slider(1, 50, value=20, step=1, label="Sampling steps") |
|
guide_scale = gr.Slider(1, 6, value=3, step=0.5, label="Guidance scale") |
|
batch_size = gr.Slider(1, 6, value=1, step=1, label="Batch size") |
|
with gr.Row(): |
|
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=999999, step=1) |
|
height = gr.Slider(label="Height", value=1024, minimum=512, maximum=2048, step=16) |
|
width = gr.Slider(label="Width", value=1024, minimum=512, maximum=2048, step=16) |
|
gallery = gr.Gallery(label="Generated images", height=800) |
|
submit_btn.click(generate, [prompt, negative_prompt, samp_steps, guide_scale, batch_size, seed, height, width], [gallery], queue=True) |
|
|
|
pipe = set_base_model(model_id) |
|
demo.launch(debug=True) |