File size: 3,178 Bytes
b6f3c99
 
 
 
 
 
 
 
 
 
 
037ee32
 
 
 
 
 
b6f3c99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad00801
b6f3c99
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import torch
import gradio as gr
from PIL import Image
import spaces
import tqdm
from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline

device = "cuda" if torch.cuda.is_available() else "cpu"
num_images_per_prompt = 1
print(f"Running on: {device}")

try:
    prior = StableCascadePriorPipeline.from_pretrained("Ketengan-Diffusion/SomniumSC-v1.1", torch_dtype=torch.bfloat16).to(device)
except OSError:
    print("Failed to load model from Hugging Face Model Hub. Loading from local path instead.")
    prior = StableCascadePriorPipeline.from_pretrained("./SomniumSC-v1.1", torch_dtype=torch.bfloat16).to(device)
    
decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", torch_dtype=torch.float16).to(device)

deafult_negative = "lowres"

prompt_add = "(dark shot:1.17), epic coloring, faded, ((neutral colors)), art, (hdr:1.5), (muted colors:1.2), hyperdetailed, (artstation:1.5), cinematic, warm lights, dramatic light, (intricate details:1.1), complex background, (rutkowski:0.8), (teal and orange:0.4)"

css = """
footer {
    visibility: hidden
}
#generate_button {
    color: white;
    border-color: #007bff;
    background: #2563eb;
}
#save_button {
    color: white;
    border-color: #028b40;
    background: #01b97c;
    width: 200px;
}
#settings_header {
    background: rgb(245, 105, 105);
}
"""

@spaces.GPU
def gen(prompt, negative, width, height, use_add, progress=gr.Progress()):
    if use_add:
        text = f"{prompt}, {prompt_add}"
    else:
        text = f"{prompt}"
    prior_output = prior(
        prompt=text,
        height=height,
        width=width,
        negative_prompt=negative,
        guidance_scale=12.0,
        num_images_per_prompt=num_images_per_prompt,
        num_inference_steps=50
    )
    decoder_output = decoder(
        image_embeddings=prior_output.image_embeddings.half(),
        prompt=text,
        negative_prompt=negative,
        guidance_scale=1.0,
        output_type="pil",
        num_inference_steps=10
    ).images
    return decoder_output

with gr.Blocks(css=css) as demo:
    gr.Markdown("# SomniumSC-v1.1 ```DEMO```")
    with gr.Row():
        prompt = gr.Textbox(show_label=False, placeholder="Masukkan prompt Anda", max_lines=3, lines=1, interactive=True, scale=20)
        button = gr.Button(value="Hasilkan", scale=1)
    with gr.Accordion("Pengaturan Lanjutan", open=False):
        with gr.Row():
            negative = gr.Textbox(show_label=False, value=deafult_negative, placeholder="Enter a negative", max_lines=4, lines=3, interactive=True)
        with gr.Row():
            width = gr.Slider(label="Width", minimum=1024, maximum=2048, step=8, value=1536, interactive=True)
            height = gr.Slider(label="Height", minimum=1024, maximum=2048, step=8, value=1536, interactive=True)
        with gr.Row():
            use_add = gr.Checkbox(label="Tingkatkan Hasil", value=False, interactive=True)
    with gr.Row():
        gallery = gr.Gallery(show_label=False, rows=1, columns=1, allow_preview=True, preview=True)

    button.click(gen, inputs=[prompt, negative, width, height, use_add], outputs=gallery)

demo.launch(show_api=False)