Ketengan-Diffusion commited on
Commit
b6f3c99
Β·
verified Β·
1 Parent(s): 4792284

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import spaces
5
+ import tqdm
6
+ from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline
7
+
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ num_images_per_prompt = 1
10
+ print(f"Running on: {device}")
11
+
12
+ # Assuming you've corrected the model paths as per previous discussions
13
+ prior = StableCascadePriorPipeline.from_pretrained("Ketengan-Diffusion/SomniumSC-v1.1", torch_dtype=torch.bfloat16).to(device)
14
+ decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", torch_dtype=torch.float16).to(device)
15
+
16
+ deafult_negative = "lowres"
17
+
18
+ prompt_add = "(dark shot:1.17), epic coloring, faded, ((neutral colors)), art, (hdr:1.5), (muted colors:1.2), hyperdetailed, (artstation:1.5), cinematic, warm lights, dramatic light, (intricate details:1.1), complex background, (rutkowski:0.8), (teal and orange:0.4)"
19
+
20
+ css = """
21
+ footer {
22
+ visibility: hidden
23
+ }
24
+ #generate_button {
25
+ color: white;
26
+ border-color: #007bff;
27
+ background: #2563eb;
28
+ }
29
+ #save_button {
30
+ color: white;
31
+ border-color: #028b40;
32
+ background: #01b97c;
33
+ width: 200px;
34
+ }
35
+ #settings_header {
36
+ background: rgb(245, 105, 105);
37
+ }
38
+ """
39
+
40
+ @spaces.GPU
41
+ def gen(prompt, negative, width, height, use_add, progress=gr.Progress()):
42
+ if use_add:
43
+ text = f"{prompt}, {prompt_add}"
44
+ else:
45
+ text = f"{prompt}"
46
+ prior_output = prior(
47
+ prompt=text,
48
+ height=height,
49
+ width=width,
50
+ negative_prompt=negative,
51
+ guidance_scale=12.0,
52
+ num_images_per_prompt=num_images_per_prompt,
53
+ num_inference_steps=50
54
+ )
55
+ decoder_output = decoder(
56
+ image_embeddings=prior_output.image_embeddings.half(),
57
+ prompt=text,
58
+ negative_prompt=negative,
59
+ guidance_scale=1.0,
60
+ output_type="pil",
61
+ num_inference_steps=10
62
+ ).images
63
+ return decoder_output
64
+
65
+ with gr.Blocks(css=css) as demo:
66
+ gr.Markdown("# SomniumSC-v1.1 ```DEMO```")
67
+ with gr.Row():
68
+ prompt = gr.Textbox(show_label=False, placeholder="Masukkan prompt Anda", max_lines=3, lines=1, interactive=True, scale=20)
69
+ button = gr.Button(value="Hasilkan", scale=1)
70
+ with gr.Accordion("Pengaturan Lanjutan", open=False):
71
+ with gr.Row():
72
+ negative = gr.Textbox(show_label=False, value=deafult_negative, placeholder="Enter a negative", max_lines=4, lines=3, interactive=True)
73
+ with gr.Row():
74
+ width = gr.Slider(label="Width", minimum=1024, maximum=2048, step=8, value=1536, interactive=True)
75
+ height = gr.Slider(label="Height", minimum=1024, maximum=2048, step=8, value=1536, interactive=True)
76
+ with gr.Row():
77
+ use_add = gr.Checkbox(label="Tingkatkan Hasil", value=True, interactive=True)
78
+ with gr.Row():
79
+ gallery = gr.Gallery(show_label=False, rows=1, columns=1, allow_preview=True, preview=True)
80
+
81
+ button.click(gen, inputs=[prompt, negative, width, height, use_add], outputs=gallery)
82
+
83
+ demo.launch(show_api=False)