File size: 6,478 Bytes
f7da062
 
 
 
 
 
 
 
 
 
 
 
 
ef2e9bf
 
 
 
 
 
 
f7da062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef2e9bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7da062
 
 
 
 
ef2e9bf
 
 
f7da062
 
 
 
 
 
 
 
ef2e9bf
f7da062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef2e9bf
 
 
 
f7da062
 
 
ef2e9bf
f7da062
 
 
 
 
ef2e9bf
f7da062
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import gradio as gr
import torch
from PIL import Image
import numpy as np
from diffusers import StableDiffusionDepth2ImgPipeline
from pathlib import Path

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dept2img = StableDiffusionDepth2ImgPipeline.from_pretrained(
    "stabilityai/stable-diffusion-2-depth",
    torch_dtype=torch.float16,
).to(device)

css = '''
    .instruction{position: absolute; top: 0;right: 0;margin-top: 0px !important}
    .arrow{position: absolute;top: 0;right: -110px;margin-top: -8px !important}
    #component-4, #component-3, #component-10{min-height: 0}
    .duplicate-button img{margin: 0}
'''


def pad_image(input_image):
    pad_w, pad_h = np.max(((2, 2), np.ceil(
        np.array(input_image.size) / 64).astype(int)), axis=0) * 64 - input_image.size
    im_padded = Image.fromarray(
        np.pad(np.array(input_image), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
    w, h = im_padded.size
    if w == h:
        return im_padded
    elif w > h:
        new_image = Image.new(im_padded.mode, (w, w), (0, 0, 0))
        new_image.paste(im_padded, (0, (w - h) // 2))
        return new_image
    else:
        new_image = Image.new(im_padded.mode, (h, h), (0, 0, 0))
        new_image.paste(im_padded, ((h - w) // 2, 0))
        return new_image


def predict(input_image, prompt, negative_prompt, steps, num_samples, scale, seed, strength, depth_image=None):
    depth = None
    if depth_image is not None:
        depth_image = pad_image(depth_image)
        depth_image = depth_image.resize((512, 512))
        depth = np.array(depth_image.convert("L"))
        depth = depth.astype(np.float32) / 255.0
        depth = depth[None, None]
        depth = torch.from_numpy(depth)
    init_image = input_image.convert("RGB")
    image = pad_image(init_image)  # resize to integer multiple of 32
    image = image.resize((512, 512))
    result = dept2img(
        image=image,
        prompt=prompt,
        negative_prompt=negative_prompt,
        depth_image=depth,
        seed=seed,
        strength=strength,
        num_inference_steps=steps,
        guidance_scale=scale,
        num_images_per_prompt=num_samples,
    )
    return result['images']


block = gr.Blocks().queue()
with block:
    with gr.Row():
        with gr.Column():
            top_description = gr.HTML(f'''
                    <div style="text-align: center; max-width: 650px; margin: 0 auto;">
                    <div>
                        <img class="logo" src="file/mirage.png" alt="Mirage Logo"
                            style="margin: auto; max-width: 7rem;">
                        <br />
                        <h1 style="font-weight: 900; font-size: 2.5rem;">
                        Depth2Img Web UI
                        </h1>
                        <br />
                        <a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/MirageML/dreambooth?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
                    </div>
                    </div>
                ''')
            # gr.Markdown("## Stable Diffusion 2 Depth2Img")
            # gr.HTML("<p><a href='https://huggingface.co/spaces/radames/stable-diffusion-depth2img?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")


    with gr.Row():
        with gr.Column():
            input_image = gr.Image(source='upload', type="pil")
            # depth_image = gr.Image(
            #     source='upload', type="pil", label="Depth image Optional", value=None)
            depth_image = None
            prompt = gr.Textbox(label="Prompt")
            negative_prompt = gr.Textbox(label="Negative Pompt")

            run_button = gr.Button(label="Run")
            with gr.Accordion("Advanced options", open=False):
                num_samples = gr.Slider(
                    label="Images", minimum=1, maximum=4, value=1, step=1)
                steps = gr.Slider(label="Steps", minimum=1,
                                  maximum=100, value=50, step=1)
                scale = gr.Slider(
                    label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1
                )
                strength = gr.Slider(
                    label="Strength", minimum=0.0, maximum=1.0, value=0.9, step=0.01
                )
                seed = gr.Slider(
                    label="Seed",
                    minimum=0,
                    maximum=2147483647,
                    step=1,
                    randomize=True,
                )
        with gr.Column():
            gallery = gr.Gallery(label="Generated images", show_label=False).style(
                grid=[2], height="auto")
    gr.Examples(
        examples=[
            ["./examples/original_iso.png", "hogwarts castle",
             "", 50, 4, 10.0, 123123123, 0.8],
            ["./examples/original_sword.png", "flaming sword",
             "", 50, 4, 9.0, 1734133747, 0.8],

        ],
        inputs=[input_image, prompt, negative_prompt, steps,
                num_samples, scale, seed, strength],
        outputs=[gallery],
        fn=predict,
        cache_examples=True,
    )
    run_button.click(fn=predict, inputs=[input_image, prompt, negative_prompt,
                     steps, num_samples, scale, seed, strength], outputs=[gallery])


block.launch(show_api=False)