Spaces:
Sleeping
Sleeping
Commit
·
866b10e
1
Parent(s):
50b2a88
up
Browse files- app.py +23 -62
- requirements.txt +3 -2
app.py
CHANGED
@@ -22,8 +22,7 @@ from diffusers import StableDiffusionUpscalePipeline
|
|
22 |
from diffusers import LDMSuperResolutionPipeline
|
23 |
import cv2
|
24 |
import onnxruntime
|
25 |
-
|
26 |
-
# from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
|
27 |
|
28 |
def removeFurniture(input_img1,
|
29 |
input_img2,
|
@@ -306,11 +305,20 @@ def upscale_image(img, rows, cols, seed, prompt, negative_prompt, xformers, cpu_
|
|
306 |
# Save the merged image
|
307 |
return final_img
|
308 |
|
309 |
-
|
310 |
-
def upscale( image, prompt):
|
311 |
-
print("upscale", image, prompt)
|
312 |
# return upscale1(image, prompt)
|
313 |
-
return upscale_image(image,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
|
315 |
modes = {
|
316 |
'1': '1',
|
@@ -359,66 +367,19 @@ with gr.Blocks() as app:
|
|
359 |
gr.Button("Upscale").click(
|
360 |
upscale,
|
361 |
inputs=[
|
362 |
-
gr.Image(),
|
363 |
-
gr.Textbox(label="prompt",value="empty room")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
],
|
365 |
outputs=gr.Image())
|
366 |
|
367 |
|
368 |
-
|
369 |
-
# with gr.Row():
|
370 |
-
|
371 |
-
# with gr.Column(scale=55):
|
372 |
-
# with gr.Group():
|
373 |
-
# with gr.Row():
|
374 |
-
# prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"Enter prompt")
|
375 |
-
# generate = gr.Button(value="Generate")
|
376 |
-
|
377 |
-
# gallery = gr.Gallery(label="Generated images", show_label=False)
|
378 |
-
# state_info = gr.Textbox(label="State", show_label=False, max_lines=2)
|
379 |
-
# error_output = gr.Markdown(visible=False)
|
380 |
-
|
381 |
-
# with gr.Column(scale=45):
|
382 |
-
# inf_mode = gr.Radio(label="Inference Mode", choices=list(modes.values())[:4], value=modes['txt2img']) # TODO remove [:3] limit
|
383 |
-
|
384 |
-
# with gr.Group(visible=False) as i2i_options:
|
385 |
-
# image = gr.Image(label="Image", height=128, type="pil")
|
386 |
-
# inpaint_info = gr.Markdown("Inpainting resizes and pads images to 512x512", visible=False)
|
387 |
-
# upscale_info = gr.Markdown("""Best for small images (128x128 or smaller).<br>
|
388 |
-
# Bigger images will be sliced into 128x128 tiles which will be upscaled individually.<br>
|
389 |
-
# This is done to avoid running out of GPU memory.""", visible=False)
|
390 |
-
# strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
|
391 |
-
|
392 |
-
# with gr.Group():
|
393 |
-
# neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
|
394 |
-
|
395 |
-
# n_images = gr.Slider(label="Number of images", value=1, minimum=1, maximum=4, step=1)
|
396 |
-
# with gr.Row():
|
397 |
-
# guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
|
398 |
-
# steps = gr.Slider(label="Steps", value=current_steps, minimum=2, maximum=100, step=1)
|
399 |
-
|
400 |
-
# with gr.Row():
|
401 |
-
# width = gr.Slider(label="Width", value=768, minimum=64, maximum=1024, step=8)
|
402 |
-
# height = gr.Slider(label="Height", value=768, minimum=64, maximum=1024, step=8)
|
403 |
-
|
404 |
-
# seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
|
405 |
-
# with gr.Accordion("Memory optimization"):
|
406 |
-
# attn_slicing = gr.Checkbox(label="Attention slicing (a bit slower, but uses less memory)", value=attn_slicing_enabled)
|
407 |
-
# # mem_eff_attn = gr.Checkbox(label="Memory efficient attention (xformers)", value=mem_eff_attn_enabled)
|
408 |
-
|
409 |
-
# inf_mode.change(on_mode_change, inputs=[inf_mode], outputs=[i2i_options, inpaint_info, upscale_info, strength], queue=False)
|
410 |
-
# steps.change(on_steps_change, inputs=[steps], outputs=[], queue=False)
|
411 |
-
# attn_slicing.change(lambda x: switch_attention_slicing(x), inputs=[attn_slicing], queue=False)
|
412 |
-
# # mem_eff_attn.change(lambda x: switch_mem_eff_attn(x), inputs=[mem_eff_attn], queue=False)
|
413 |
-
|
414 |
-
# inputs = [inf_mode, prompt, n_images, guidance, steps, width, height, seed, image, strength, neg_prompt]
|
415 |
-
# outputs = [gallery, error_output]
|
416 |
-
# prompt.submit(inference, inputs=inputs, outputs=outputs)
|
417 |
-
# generate.click(inference, inputs=inputs, outputs=outputs)
|
418 |
-
|
419 |
-
# app.load(update_state_info, inputs=state_info, outputs=state_info, every=0.5, show_progress=False)
|
420 |
-
|
421 |
-
|
422 |
app.queue()
|
423 |
app.launch(debug=True,share=True, height=768)
|
424 |
|
|
|
22 |
from diffusers import LDMSuperResolutionPipeline
|
23 |
import cv2
|
24 |
import onnxruntime
|
25 |
+
from split_image import split
|
|
|
26 |
|
27 |
def removeFurniture(input_img1,
|
28 |
input_img2,
|
|
|
305 |
# Save the merged image
|
306 |
return final_img
|
307 |
|
308 |
+
|
309 |
+
def upscale( image, prompt, negative_prompt, rows, guidance, iterations, xformers_input, cpu_offload_input, attention_slicing_input):
|
310 |
+
print("upscale", image, prompt, negative_prompt, rows, guidance, iterations, xformers_input, cpu_offload_input, attention_slicing_input)
|
311 |
# return upscale1(image, prompt)
|
312 |
+
return upscale_image(image,
|
313 |
+
rows=rows,cols=rows,
|
314 |
+
seed=-1,
|
315 |
+
prompt=prompt,
|
316 |
+
guidance=guidance,
|
317 |
+
negative_prompt=negative_prompt,
|
318 |
+
xformers=xformers_input,
|
319 |
+
cpu_offload=cpu_offload_input,
|
320 |
+
attention_slicing=attention_slicing_input,
|
321 |
+
iterations=iterations)
|
322 |
|
323 |
modes = {
|
324 |
'1': '1',
|
|
|
367 |
gr.Button("Upscale").click(
|
368 |
upscale,
|
369 |
inputs=[
|
370 |
+
gr.Image(label="Source Image to upscale"),
|
371 |
+
gr.Textbox(label="prompt",value="empty room"),
|
372 |
+
gr.Textbox(label="negative prompt",value="jpeg artifacts, lowres, bad quality, watermark, text"),
|
373 |
+
gr.Number(value=2, label="Tile grid dimension amount (number of rows and columns) - X by X "),
|
374 |
+
gr.Slider(2, 15, 7, step=1, label='Guidance Scale: How much the AI influences the Upscaling.'),
|
375 |
+
gr.Slider(10, 100, 10, step=1, label='Number of Iterations'),
|
376 |
+
gr.Checkbox(value=True,label="Enable Xformers memory efficient attention"),
|
377 |
+
gr.Checkbox(value=True,label="Enable sequential CPU offload"),
|
378 |
+
gr.Checkbox(value=True,label="Enable attention slicing")
|
379 |
],
|
380 |
outputs=gr.Image())
|
381 |
|
382 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
383 |
app.queue()
|
384 |
app.launch(debug=True,share=True, height=768)
|
385 |
|
requirements.txt
CHANGED
@@ -2,11 +2,12 @@ diffusers==0.15.0
|
|
2 |
xformers==0.0.16
|
3 |
transformers==4.28.0
|
4 |
torchvision==0.14.1
|
5 |
-
|
6 |
opencv-python-headless==4.7.0.72
|
7 |
scipy==1.10.0
|
8 |
python-docx
|
9 |
triton
|
10 |
altair<5
|
11 |
gradio
|
12 |
-
onnxruntime==1.12.0
|
|
|
|
2 |
xformers==0.0.16
|
3 |
transformers==4.28.0
|
4 |
torchvision==0.14.1
|
5 |
+
accelerate
|
6 |
opencv-python-headless==4.7.0.72
|
7 |
scipy==1.10.0
|
8 |
python-docx
|
9 |
triton
|
10 |
altair<5
|
11 |
gradio
|
12 |
+
onnxruntime==1.12.0
|
13 |
+
split_image
|