File size: 2,851 Bytes
f4ff201
 
3fec1fb
f4ff201
975497a
f022e2c
f4ff201
efcd5d5
5fe1df2
 
f4ff201
 
3fec1fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
548031b
 
 
 
 
 
 
 
 
3fec1fb
 
 
 
0a14984
3fec1fb
5305898
3fec1fb
 
548031b
 
 
 
3fec1fb
548031b
b00a80c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import torch
from diffusers import StableDiffusionPipeline
import gradio as gr

model_base = "Krebzonide/LazyMixPlus"
lora_model_path = "Krebzonide/94g1-jemz-41r2-0"

pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16, use_safetensors=True)
pipe.unet.load_attn_procs(lora_model_path) #working, commented to test stuff------------------------------------------
#pipe.unet.load_attn_procs(lora_model_path, use_auth_token=True) #test accessing a private model----------------------
pipe.to("cuda")

css = """
.btn-green {
  background-image: linear-gradient(to bottom right, #86efac, #22c55e) !important;
  border-color: #22c55e !important;
  color: #166534 !important;
}
.btn-green:hover {
  background-image: linear-gradient(to bottom right, #86efac, #86efac) !important;
}
.btn-red {
  background: linear-gradient(to bottom right, #fda4af, #fb7185) !important;
  border-color: #fb7185 !important;
  color: #9f1239 !important;
}
.btn-red:hover {background: linear-gradient(to bottom right, #fda4af, #fda4af) !important;}
/*****/
.dark .btn-green {
  background-image: linear-gradient(to bottom right, #047857, #065f46) !important;
  border-color: #047857 !important;
  color: #ffffff !important;
}
.dark .btn-green:hover {
  background-image: linear-gradient(to bottom right, #047857, #047857) !important;
}
.dark .btn-red {
  background: linear-gradient(to bottom right, #be123c, #9f1239) !important;
  border-color: #be123c !important;
  color: #ffffff !important;
}
.dark .btn-red:hover {background: linear-gradient(to bottom right, #be123c, #be123c) !important;}
"""

def generate(prompt, neg_prompt, samp_steps, guide_scale, lora_scale):
    images = pipe(
        prompt,
        negative_prompt=neg_prompt,
        num_inference_steps=samp_steps,
        guidance_scale=guide_scale,
        cross_attention_kwargs={"scale": lora_scale},
        num_images_per_prompt=4
    ).images
    return [(img, f"Image {i+1}") for i, img in enumerate(images)]
        

with gr.Blocks(css=css) as demo:
    with gr.Column():
        prompt = gr.Textbox(label="Prompt")
        negative_prompt = gr.Textbox(label="Negative Prompt", value="lowres, bad anatomy, bad hands, cropped, worst quality, disfigured, deformed, extra limbs, asian, filter, render")
        submit_btn = gr.Button("Generate", variant="primary", min_width="96px")
        gallery = gr.Gallery(label="Generated images")
        with gr.Row():
            samp_steps = gr.Slider(1, 100, value=30, step=1, label="Sampling steps")
            guide_scale = gr.Slider(1, 10, value=6, step=0.5, label="Guidance scale")
            lora_scale = gr.Slider(0, 1, value=0.5, step=0.01, label="LoRA power")

    submit_btn.click(generate, [prompt, negative_prompt, samp_steps, guide_scale, lora_scale], [gallery], queue=True)

demo.queue(1)
demo.launch(debug=True)