File size: 5,670 Bytes
392b88f
f4ff201
7e9a760
ceb106c
3fec1fb
f4ff201
a211f54
1d60574
cbdb110
 
 
6fed0f7
0a2c532
 
2f7f340
e881755
 
6fed0f7
fae7b1e
 
2f7f340
e881755
69f6513
 
ae9efe4
3fec1fb
 
70e3d12
3fec1fb
 
 
 
70e3d12
3fec1fb
 
 
89bcb6a
7cb4039
 
 
6dd4c00
cbdb110
 
 
6dd4c00
cbdb110
 
5d0cc84
14fbbca
7e9a760
 
548031b
 
85bca33
548031b
89bcb6a
dba1359
ea3b1d6
 
7e9a760
ae9efe4
5a5ee63
3fec1fb
6fed0f7
 
a02443f
6fed0f7
ca74145
 
 
 
 
 
a211f54
ca74145
 
6c08f6c
ca74145
75f237b
2939163
34253b3
5626083
 
2a01ee4
 
5626083
 
 
 
 
 
9b24dcc
5626083
c08f255
cbdb110
5626083
 
3fec1fb
0a14984
3fec1fb
f1ebf81
70e3d12
548031b
2f7f340
 
27f8afa
2839abc
b550dc4
2f7f340
75f237b
 
 
75c82aa
b550dc4
 
bb307d3
b550dc4
2939163
 
63aa355
6fed0f7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
import torch
import random
import os
import gradio as gr

hf_token = os.getenv("HF_TOKEN")
model_id = int(os.getenv("Model"))
nsfw_filter_enabled = int(os.getenv("Safe"))
naughty_words = os.getenv("NaughtyWords").split()
override = os.getenv("Override")

#stable-diffusion-xl-base-1.0  0 - base model
#Colossus_Project_XL           1 - better people
#AlbedoBaseXL_v11              2 - realistic
#JuggernautXL_v7               3 - better faces
#RealVisXL_V2.0                4 - better photorealism

model_url_list = ["stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors",
                 "Krebzonide/Colossus_Project_XL/blob/main/colossusProjectXLSFW_v202BakedVAE.safetensors",
                 "Krebzonide/AlbedoBaseXL_v11/blob/main/albedobaseXL_v11.safetensors",
                 "Krebzonide/JuggernautXL_version5/blob/main/juggernautXL_v7Rundiffusion.safetensors",
                 "SG161222/RealVisXL_V2.0/blob/main/RealVisXL_V2.0.safetensors",
                 "Krebzonide/AcornIsSpinning_acornXLV1/blob/main/acornIsSpinning_acornxlV1.safetensors"]

css = """
.btn-green {
  background-image: linear-gradient(to bottom right, #6dd178, #00a613) !important;
  border-color: #22c55e !important;
  color: #166534 !important;
}
.btn-green:hover {
  background-image: linear-gradient(to bottom right, #6dd178, #6dd178) !important;
}
"""

def generate(prompt, neg_prompt, samp_steps, cfg_scale, batch_size, seed, height, width, progress=gr.Progress(track_tqdm=True)):
    print("---------------------------------------------------------")
    print(prompt)
    print(neg_prompt)
    prompt = prompt.lower()
    if nsfw_filter_enabled:
        if prompt[:len(override)] == override:
            prompt = prompt[len(override):]
        else:
            neg_prompt = neg_prompt + ", child, nsfw, nude, underwear"
            for word in naughty_words:
                if prompt.find(word) >= 0:
                    return None, 58008
    if seed < 0:
        seed = random.randint(1,999999)
    images = pipe(
        prompt,
        negative_prompt=neg_prompt,
        num_inference_steps=samp_steps,
        guidance_scale=cfg_scale,
        num_images_per_prompt=batch_size,
        height=height,
        width=width,
        generator=torch.manual_seed(seed),
    ).images
    return gr.update(value = [(img, f"Image {i+1}") for i, img in enumerate(images)], height=height+90), seed
        
def set_base_model(base_model_id):
    vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
    global model_url_list
    model_url = "https://huggingface.co/" + model_url_list[base_model_id]
    pipe = StableDiffusionXLPipeline.from_single_file(
        model_url,
        torch_dtype = torch.float16,
        variant = "fp16",
        vae = vae,
        use_safetensors = True,
        use_auth_token=hf_token
    )
    pipe.to("cuda")
    pipe.enable_xformers_memory_efficient_attention()
    return pipe

def update_pixel_ratio(num1, num2):
    return [round((num1-(num1%8))*num2/1048576,3), num1-(num1%8)]

examples = [
    ['A group of 4 students from University of Wisconsin Stout sitting at a table talking, men and women, detailed faces, focused',
    'glitch, deformed, cross-eyed'],
    ['A serious capybara at work, wearing a suit',
    'low quality'],
    ['a graffiti of a robot serving meals to people',
    'low quality'],
    ['photo of a small cozy modern house in red woods on a mountain, solar panels, garage, driveway, great view, sunshine',
    'red house'],
    ['cinematic photo of a woman sitting at a cafe, 35mm photograph, film, bokeh, professional, 4k, detailed face',
    'drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly'],
    ['analog film photo of old woman on the streets of london, faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage',
    'painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured']
]

with gr.Blocks(css=css) as demo:
    with gr.Column():
        prompt = gr.Textbox(label="Prompt")
        negative_prompt = gr.Textbox(label="Negative Prompt")
        submit_btn = gr.Button("Generate", elem_classes="btn-green")
        with gr.Row():
            samp_steps = gr.Slider(1, 30, value=20, step=1, label="Sampling steps")
            cfg_scale = gr.Slider(1, 10, value=4, step=0.5, label="Guidance scale")
            batch_size = gr.Slider(1, 2, value=1, step=1, label="Batch size", interactive=True)
        with gr.Row():
            height = gr.Slider(label="Height", value=1024, minimum=8, maximum=1536, step=8)
            width = gr.Slider(label="Width", value=1024, minimum=8, maximum=1024, step=8)
        with gr.Row():
            pixels = gr.Number(label="Pixel Ratio", value=1, interactive=False)
            seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0)
        gallery = gr.Gallery(show_label=False, preview=True, container=False, height=1100)
        with gr.Row():
            lastSeed = gr.Number(label="Last Seed", value=-1, interactive=False)
        ex = gr.Examples(examples=examples, inputs=[prompt, negative_prompt])
    submit_btn.click(generate, [prompt, negative_prompt, samp_steps, cfg_scale, batch_size, seed, height, width], [gallery, lastSeed], queue=True)
    height.release(update_pixel_ratio, [height, width], [pixels, height], queue=False)
    width.release(update_pixel_ratio, [width, height], [pixels, width], queue=False)

pipe = set_base_model(model_id)
demo.launch(debug=True)