Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
af85adf
1
Parent(s):
3868923
new-ui (#2)
Browse files- Update UI (8caa43f5888e91e2d77ebec0eab2a48bd1c91bac)
Co-authored-by: Multimodal AI art <[email protected]>
app.py
CHANGED
@@ -39,9 +39,9 @@ class Model:
|
|
39 |
|
40 |
|
41 |
models = [
|
42 |
-
Model("Stable Diffusion v1-4", "CompVis/stable-diffusion-v1-4"),
|
43 |
# Model("Stable Diffusion v1-5", "runwayml/stable-diffusion-v1-5"),
|
44 |
-
|
45 |
]
|
46 |
|
47 |
MODELS = {m.name: m for m in models}
|
@@ -59,12 +59,12 @@ def error_str(error, title="Error"):
|
|
59 |
|
60 |
|
61 |
def inference(
|
62 |
-
model_name,
|
63 |
prompt,
|
|
|
64 |
guidance,
|
65 |
steps,
|
66 |
-
seed
|
67 |
-
|
68 |
):
|
69 |
|
70 |
print(psutil.virtual_memory()) # print memory usage
|
@@ -141,52 +141,61 @@ with gr.Blocks(css="style.css") as demo:
|
|
141 |
gr.HTML(
|
142 |
f"""
|
143 |
<div class="finetuned-diffusion-div">
|
144 |
-
<div>
|
145 |
-
<h1>Stable Diffusion Latent Upscaler</h1>
|
|
|
|
|
|
|
146 |
</div>
|
147 |
-
|
148 |
-
|
149 |
-
</p>
|
150 |
-
<p>
|
151 |
-
Running on <b>{device}</b>
|
152 |
-
</p>
|
153 |
-
<p>You can also duplicate this space and upgrade to gpu by going to settings:<br>
|
154 |
<a style="display:inline-block" href="https://huggingface.co/spaces/patrickvonplaten/finetuned_diffusion?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
|
|
|
155 |
</div>
|
156 |
"""
|
157 |
)
|
158 |
-
with gr.Row():
|
159 |
|
160 |
-
|
161 |
-
|
162 |
model_name = gr.Dropdown(
|
163 |
label="Model",
|
164 |
choices=[m.name for m in models],
|
165 |
value=models[0].name,
|
|
|
166 |
)
|
167 |
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
)
|
178 |
-
|
179 |
-
|
180 |
-
error_output = gr.Markdown()
|
181 |
-
|
182 |
-
with gr.Column(scale=45):
|
183 |
-
with gr.Tab("Options"):
|
184 |
with gr.Group():
|
185 |
-
neg_prompt = gr.Textbox(
|
186 |
-
label="Negative prompt",
|
187 |
-
placeholder="What to exclude from the image",
|
188 |
-
)
|
189 |
-
|
190 |
with gr.Row():
|
191 |
guidance = gr.Slider(
|
192 |
label="Guidance scale", value=7.5, maximum=15
|
@@ -202,36 +211,45 @@ with gr.Blocks(css="style.css") as demo:
|
|
202 |
seed = gr.Slider(
|
203 |
0, 2147483647, label="Seed (0 = random)", value=0, step=1
|
204 |
)
|
|
|
205 |
|
206 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
|
208 |
inputs = [
|
209 |
-
model_name,
|
210 |
prompt,
|
|
|
211 |
guidance,
|
212 |
steps,
|
213 |
seed,
|
214 |
-
|
215 |
]
|
216 |
outputs = [low_res_image, up_res_image, error_output]
|
217 |
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
218 |
generate.click(inference, inputs=inputs, outputs=outputs)
|
219 |
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
|
|
|
|
230 |
gr.HTML(
|
231 |
"""
|
232 |
<div style="border-top: 1px solid #303030;">
|
233 |
<br>
|
234 |
-
<p>
|
235 |
<p>This space uses the <a href="https://github.com/LuChengTHU/dpm-solver">DPM-Solver++</a> sampler by <a href="https://arxiv.org/abs/2206.00927">Cheng Lu, et al.</a>.</p>
|
236 |
<p>This is a Demo Space For:<br>
|
237 |
<a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler">Stability AI's Latent Upscaler</a>
|
|
|
39 |
|
40 |
|
41 |
models = [
|
42 |
+
#Model("Stable Diffusion v1-4", "CompVis/stable-diffusion-v1-4"),
|
43 |
# Model("Stable Diffusion v1-5", "runwayml/stable-diffusion-v1-5"),
|
44 |
+
Model("anything-v4.0", "andite/anything-v4.0"),
|
45 |
]
|
46 |
|
47 |
MODELS = {m.name: m for m in models}
|
|
|
59 |
|
60 |
|
61 |
def inference(
|
|
|
62 |
prompt,
|
63 |
+
neg_prompt,
|
64 |
guidance,
|
65 |
steps,
|
66 |
+
seed,
|
67 |
+
model_name,
|
68 |
):
|
69 |
|
70 |
print(psutil.virtual_memory()) # print memory usage
|
|
|
141 |
gr.HTML(
|
142 |
f"""
|
143 |
<div class="finetuned-diffusion-div">
|
144 |
+
<div style="text-align: center">
|
145 |
+
<h1>Anything v4 model + <a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler">Stable Diffusion Latent Upscaler</a></h1>
|
146 |
+
<p>
|
147 |
+
Demo for the <a href="https://huggingface.co/andite/anything-v4.0">Anything v4</a> model hooked with the ultra-fast <a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler">Latent Upscaler</a>
|
148 |
+
</p>
|
149 |
</div>
|
150 |
+
<!--
|
151 |
+
<p>To skip the queue, you can duplicate this Space<br>
|
|
|
|
|
|
|
|
|
|
|
152 |
<a style="display:inline-block" href="https://huggingface.co/spaces/patrickvonplaten/finetuned_diffusion?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
|
153 |
+
-->
|
154 |
</div>
|
155 |
"""
|
156 |
)
|
|
|
157 |
|
158 |
+
with gr.Column(scale=100):
|
159 |
+
with gr.Group(visible=False):
|
160 |
model_name = gr.Dropdown(
|
161 |
label="Model",
|
162 |
choices=[m.name for m in models],
|
163 |
value=models[0].name,
|
164 |
+
visible=False
|
165 |
)
|
166 |
|
167 |
+
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
168 |
+
with gr.Column():
|
169 |
+
prompt = gr.Textbox(
|
170 |
+
label="Enter your prompt",
|
171 |
+
show_label=False,
|
172 |
+
max_lines=1,
|
173 |
+
placeholder="Enter your prompt",
|
174 |
+
elem_id="prompt-text-input",
|
175 |
+
).style(
|
176 |
+
border=(True, False, True, True),
|
177 |
+
rounded=(True, False, False, True),
|
178 |
+
container=False,
|
179 |
+
)
|
180 |
+
neg_prompt = gr.Textbox(
|
181 |
+
label="Enter your negative prompt",
|
182 |
+
show_label=False,
|
183 |
+
max_lines=1,
|
184 |
+
placeholder="Enter a negative prompt",
|
185 |
+
elem_id="negative-prompt-text-input",
|
186 |
+
).style(
|
187 |
+
border=(True, False, True, True),
|
188 |
+
rounded=(True, False, False, True),
|
189 |
+
container=False,
|
190 |
+
)
|
191 |
+
generate = gr.Button("Generate image").style(
|
192 |
+
margin=False,
|
193 |
+
rounded=(False, True, True, False),
|
194 |
+
full_width=False,
|
195 |
)
|
196 |
+
|
197 |
+
with gr.Accordion("Advanced Options", open=False):
|
|
|
|
|
|
|
|
|
198 |
with gr.Group():
|
|
|
|
|
|
|
|
|
|
|
199 |
with gr.Row():
|
200 |
guidance = gr.Slider(
|
201 |
label="Guidance scale", value=7.5, maximum=15
|
|
|
211 |
seed = gr.Slider(
|
212 |
0, 2147483647, label="Seed (0 = random)", value=0, step=1
|
213 |
)
|
214 |
+
|
215 |
|
216 |
+
with gr.Column(scale=100):
|
217 |
+
with gr.Row():
|
218 |
+
with gr.Column(scale=75):
|
219 |
+
up_res_image = gr.Image(label="Upscaled 1024px Image", shape=(1024, 1024))
|
220 |
+
with gr.Column(scale=25):
|
221 |
+
low_res_image = gr.Image(label="Original 512px Image", shape=(512, 512))
|
222 |
+
error_output = gr.Markdown()
|
223 |
|
224 |
inputs = [
|
|
|
225 |
prompt,
|
226 |
+
neg_prompt,
|
227 |
guidance,
|
228 |
steps,
|
229 |
seed,
|
230 |
+
model_name,
|
231 |
]
|
232 |
outputs = [low_res_image, up_res_image, error_output]
|
233 |
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
234 |
generate.click(inference, inputs=inputs, outputs=outputs)
|
235 |
|
236 |
+
ex = gr.Examples(
|
237 |
+
[
|
238 |
+
["a mecha robot in a favela", "low quality", 7.5, 25, 33, models[0].name],
|
239 |
+
["the spirit of a tamagotchi wandering in the city of Paris", "low quality, bad render", 7.5, 50, 85, models[0].name],
|
240 |
+
],
|
241 |
+
inputs=[prompt, neg_prompt, guidance, steps, seed, model_name],
|
242 |
+
outputs=outputs,
|
243 |
+
fn=inference,
|
244 |
+
cache_examples=True,
|
245 |
+
)
|
246 |
+
ex.dataset.headers = [""]
|
247 |
+
|
248 |
gr.HTML(
|
249 |
"""
|
250 |
<div style="border-top: 1px solid #303030;">
|
251 |
<br>
|
252 |
+
<p>Space by 🤗 Hugging Face, models by Stability AI, andite, linaqruf and others ❤️</p>
|
253 |
<p>This space uses the <a href="https://github.com/LuChengTHU/dpm-solver">DPM-Solver++</a> sampler by <a href="https://arxiv.org/abs/2206.00927">Cheng Lu, et al.</a>.</p>
|
254 |
<p>This is a Demo Space For:<br>
|
255 |
<a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler">Stability AI's Latent Upscaler</a>
|
style.css
CHANGED
@@ -1,24 +1,36 @@
|
|
1 |
-
.
|
2 |
-
|
3 |
-
align-items:center;
|
4 |
-
gap:.8rem;
|
5 |
-
font-size:1.75rem
|
6 |
}
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
|
|
10 |
}
|
11 |
-
|
12 |
-
|
13 |
-
font-
|
|
|
14 |
}
|
15 |
-
|
16 |
-
|
|
|
17 |
}
|
18 |
-
|
19 |
-
|
20 |
-
margin-bottom:
|
|
|
21 |
}
|
22 |
-
|
23 |
-
|
|
|
24 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.container {
|
2 |
+
max-width: 960px
|
|
|
|
|
|
|
3 |
}
|
4 |
+
|
5 |
+
.finetuned-diffusion-div div {
|
6 |
+
align-items: center;
|
7 |
+
gap: .8rem;
|
8 |
+
font-size: 1.75rem
|
9 |
}
|
10 |
+
|
11 |
+
.finetuned-diffusion-div div h1 {
|
12 |
+
font-weight: 900;
|
13 |
+
margin-bottom: 7px
|
14 |
}
|
15 |
+
|
16 |
+
.finetuned-diffusion-div div p {
|
17 |
+
font-size: 50%
|
18 |
}
|
19 |
+
|
20 |
+
.finetuned-diffusion-div p {
|
21 |
+
margin-bottom: 10px;
|
22 |
+
font-size: 94%
|
23 |
}
|
24 |
+
|
25 |
+
a {
|
26 |
+
text-decoration: underline
|
27 |
}
|
28 |
+
|
29 |
+
.tabs {
|
30 |
+
margin-top: 0;
|
31 |
+
margin-bottom: 0
|
32 |
+
}
|
33 |
+
|
34 |
+
#gallery {
|
35 |
+
min-height: 20rem
|
36 |
+
}
|