Spaces:
Sleeping
Sleeping
Commit
·
c70d532
1
Parent(s):
61f93a2
add 2 scale
Browse files
app.py
CHANGED
@@ -224,12 +224,24 @@ def split_image(im, rows, cols, should_square, should_quiet=False):
|
|
224 |
n += 1
|
225 |
return [img for img in images]
|
226 |
|
227 |
-
def upscale_image(img, rows, cols, seed, prompt, negative_prompt, xformers, cpu_offload, attention_slicing, enable_custom_sliders=False, guidance=7, iterations=50):
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
pipeline = pipeline.to("cuda")
|
234 |
if xformers:
|
235 |
pipeline.enable_xformers_memory_efficient_attention()
|
@@ -306,26 +318,27 @@ def upscale_image(img, rows, cols, seed, prompt, negative_prompt, xformers, cpu_
|
|
306 |
return final_img
|
307 |
|
308 |
|
309 |
-
def upscale( image, prompt, negative_prompt, rows, guidance, iterations, xformers_input, cpu_offload_input, attention_slicing_input):
|
310 |
-
print("upscale",
|
311 |
-
|
312 |
-
return upscale_image(image,
|
313 |
rows=rows,cols=rows,
|
|
|
314 |
seed=-1,
|
315 |
prompt=prompt,
|
316 |
-
|
317 |
-
|
318 |
-
xformers=xformers_input,
|
319 |
-
cpu_offload=cpu_offload_input,
|
320 |
attention_slicing=attention_slicing_input,
|
|
|
321 |
iterations=iterations)
|
322 |
|
323 |
-
modes = {
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
}
|
329 |
|
330 |
|
331 |
|
@@ -371,8 +384,9 @@ with gr.Blocks() as app:
|
|
371 |
gr.Textbox(label="prompt",value="empty room"),
|
372 |
gr.Textbox(label="negative prompt",value="jpeg artifacts, lowres, bad quality, watermark, text"),
|
373 |
gr.Number(value=2, label="Tile grid dimension amount (number of rows and columns) - X by X "),
|
|
|
374 |
gr.Slider(2, 15, 7, step=1, label='Guidance Scale: How much the AI influences the Upscaling.'),
|
375 |
-
gr.Slider(
|
376 |
gr.Checkbox(value=True,label="Enable Xformers memory efficient attention"),
|
377 |
gr.Checkbox(value=True,label="Enable sequential CPU offload"),
|
378 |
gr.Checkbox(value=True,label="Enable attention slicing")
|
@@ -380,7 +394,7 @@ with gr.Blocks() as app:
|
|
380 |
outputs=gr.Image())
|
381 |
|
382 |
|
383 |
-
|
384 |
-
app.launch(
|
385 |
|
386 |
# UP 1
|
|
|
224 |
n += 1
|
225 |
return [img for img in images]
|
226 |
|
227 |
+
def upscale_image(img, rows, up_factor, cols, seed, prompt, negative_prompt, xformers, cpu_offload, attention_slicing, enable_custom_sliders=False, guidance=7, iterations=50):
|
228 |
+
|
229 |
+
if up_factor==2:
|
230 |
+
model_id = "stabilityai/sd-x2-latent-upscaler"
|
231 |
+
try:
|
232 |
+
pipeline = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
233 |
+
except:
|
234 |
+
pipeline = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16, local_files_only=True)
|
235 |
+
|
236 |
+
if up_factor==4:
|
237 |
+
model_id = "stabilityai/stable-diffusion-x4-upscaler"
|
238 |
+
|
239 |
+
try:
|
240 |
+
pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
241 |
+
except:
|
242 |
+
pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16, local_files_only=True)
|
243 |
+
|
244 |
+
|
245 |
pipeline = pipeline.to("cuda")
|
246 |
if xformers:
|
247 |
pipeline.enable_xformers_memory_efficient_attention()
|
|
|
318 |
return final_img
|
319 |
|
320 |
|
321 |
+
def upscale( image, prompt, negative_prompt, rows, up_factor, guidance, iterations, xformers_input, cpu_offload_input, attention_slicing_input):
|
322 |
+
print("upscale", prompt, negative_prompt, rows, up_factor, guidance, iterations, xformers_input, cpu_offload_input, attention_slicing_input)
|
323 |
+
return upscale_image(img=image,
|
|
|
324 |
rows=rows,cols=rows,
|
325 |
+
up_factor=up_factor,
|
326 |
seed=-1,
|
327 |
prompt=prompt,
|
328 |
+
negative_prompt=negative_prompt,
|
329 |
+
enable_custom_sliders=True,
|
330 |
+
xformers=xformers_input,
|
331 |
+
cpu_offload=cpu_offload_input,
|
332 |
attention_slicing=attention_slicing_input,
|
333 |
+
guidance=guidance,
|
334 |
iterations=iterations)
|
335 |
|
336 |
+
# modes = {
|
337 |
+
# '1': '1',
|
338 |
+
# 'img2img': 'Image to Image',
|
339 |
+
# 'inpaint': 'Inpainting',
|
340 |
+
# 'upscale4x': 'Upscale 4x',
|
341 |
+
# }
|
342 |
|
343 |
|
344 |
|
|
|
384 |
gr.Textbox(label="prompt",value="empty room"),
|
385 |
gr.Textbox(label="negative prompt",value="jpeg artifacts, lowres, bad quality, watermark, text"),
|
386 |
gr.Number(value=2, label="Tile grid dimension amount (number of rows and columns) - X by X "),
|
387 |
+
gr.Slider(2, 4, 2, step=2, label='Upscale 2 or 4'),
|
388 |
gr.Slider(2, 15, 7, step=1, label='Guidance Scale: How much the AI influences the Upscaling.'),
|
389 |
+
gr.Slider(2, 100, 10, step=1, label='Number of Iterations'),
|
390 |
gr.Checkbox(value=True,label="Enable Xformers memory efficient attention"),
|
391 |
gr.Checkbox(value=True,label="Enable sequential CPU offload"),
|
392 |
gr.Checkbox(value=True,label="Enable attention slicing")
|
|
|
394 |
outputs=gr.Image())
|
395 |
|
396 |
|
397 |
+
app.queue()
|
398 |
+
app.launch()
|
399 |
|
400 |
# UP 1
|