File size: 13,745 Bytes
e3f97f3
 
 
 
 
ad18a5b
d1e787c
4811b12
d1e787c
4cd9bad
384ec64
d1e787c
4cd9bad
ad18a5b
4cd9bad
ad18a5b
4811b12
e3f97f3
384ec64
ad18a5b
 
 
 
 
 
 
 
 
 
 
 
384ec64
ad18a5b
 
 
4e26ea8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384ec64
e3f97f3
 
 
 
 
03168a3
 
4811b12
e3f97f3
03168a3
 
 
4811b12
 
e3f97f3
 
 
 
 
384ec64
9c4025a
e3f97f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384ec64
e3f97f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384ec64
e3f97f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384ec64
e3f97f3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
"""The model used in this Space alters the underlying Stable Diffusion model available at
https://huggingface.co/CompVis/stable-diffusion-v1-4 through the addition of new embedding vectors
in order to capture the likeness of the Determined AI logo.  These alternations are fully captured
in the learned_embeddings_dict.pt pickle file in the root of the repository."""

import pathlib
import os
from PIL import Image

import gradio as gr
import torch
from diffusers import StableDiffusionPipeline

import utils

use_auth_token = os.environ["HF_AUTH_TOKEN"]
NSFW_IMAGE = Image.open("nsfw.png")
BATCH_SIZE = 2

# Instantiate the pipeline.
device, revision, torch_dtype = (
    ("cuda", "fp16", torch.float16)
    if torch.cuda.is_available()
    else ("cpu", "main", torch.float32)
)
pipeline = StableDiffusionPipeline.from_pretrained(
    pretrained_model_name_or_path="CompVis/stable-diffusion-v1-4",
    use_auth_token=use_auth_token,
    revision=revision,
    torch_dtype=torch_dtype,
).to(device)

# Load in the new concepts.
CONCEPT_PATH = pathlib.Path("learned_embeddings_dict.pt")
learned_embeddings_dict = torch.load(CONCEPT_PATH)

concept_to_dummy_tokens_map = {}
for concept_token, embedding_dict in learned_embeddings_dict.items():
    initializer_tokens = embedding_dict["initializer_tokens"]
    learned_embeddings = embedding_dict["learned_embeddings"]
    (
        initializer_ids,
        dummy_placeholder_ids,
        dummy_placeholder_tokens,
    ) = utils.add_new_tokens_to_tokenizer(
        concept_token=concept_token,
        initializer_tokens=initializer_tokens,
        tokenizer=pipeline.tokenizer,
    )
    pipeline.text_encoder.resize_token_embeddings(len(pipeline.tokenizer))
    token_embeddings = pipeline.text_encoder.get_input_embeddings().weight.data
    for d_id, tensor in zip(dummy_placeholder_ids, learned_embeddings):
        token_embeddings[d_id] = tensor
    concept_to_dummy_tokens_map[concept_token] = dummy_placeholder_tokens


def replace_concept_tokens(text: str):
    for concept_token, dummy_tokens in concept_to_dummy_tokens_map.items():
        text = text.replace(concept_token, dummy_tokens)
    return text


all_imgs = []


def inference(prompt: str, guidance_scale: int, num_inference_steps: int, seed: int):
    prompt = replace_concept_tokens(prompt)
    generator = torch.Generator(device=device).manual_seed(seed)
    output = pipeline(
        prompt=[prompt] * BATCH_SIZE,
        num_inference_steps=num_inference_steps,
        guidance_scale=guidance_scale,
        generator=generator,
    )
    img_list, nsfw_list = output.images, output.nsfw_content_detected
    filtered_imgs = [
        img if not nsfw else NSFW_IMAGE for img, nsfw in zip(img_list, nsfw_list)
    ]
    all_imgs.extend(filtered_imgs)
    return all_imgs


css = """
        .gradio-container {
            font-family: 'IBM Plex Sans', sans-serif;
        }
        .gr-button {
            color: white;
            border-color: black;
            background: black;
        }
        input[type='range'] {
            accent-color: black;
        }
        .dark input[type='range'] {
            accent-color: #dfdfdf;
        }
        .container {
            max-width: 730px;
            margin: auto;
            padding-top: 1.5rem;
        }
        #gallery {
            min-height: 22rem;
            margin-bottom: 15px;
            margin-left: auto;
            margin-right: auto;
            border-bottom-right-radius: .5rem !important;
            border-bottom-left-radius: .5rem !important;
        }
        #gallery>div>.h-full {
            min-height: 20rem;
        }
        .details:hover {
            text-decoration: underline;
        }
        .gr-button {
            white-space: nowrap;
        }
        .gr-button:focus {
            border-color: rgb(147 197 253 / var(--tw-border-opacity));
            outline: none;
            box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
            --tw-border-opacity: 1;
            --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
            --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
            --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
            --tw-ring-opacity: .5;
        }
        #advanced-btn {
            font-size: .7rem !important;
            line-height: 19px;
            margin-top: 12px;
            margin-bottom: 12px;
            padding: 2px 8px;
            border-radius: 14px !important;
        }
        #advanced-options {
            display: none;
            margin-bottom: 20px;
        }
        .footer {
            margin-bottom: 45px;
            margin-top: 35px;
            text-align: center;
            border-bottom: 1px solid #e5e5e5;
        }
        .footer>p {
            font-size: .8rem;
            display: inline-block;
            padding: 0 10px;
            transform: translateY(10px);
            background: white;
        }
        .dark .footer {
            border-color: #303030;
        }
        .dark .footer>p {
            background: #0b0f19;
        }
        .acknowledgments h4{
            margin: 1.25em 0 .25em 0;
            font-weight: bold;
            font-size: 115%;
        }
        #container-advanced-btns{
            display: flex;
            flex-wrap: wrap;
            justify-content: space-between;
            align-items: center;
        }
        .animate-spin {
            animation: spin 1s linear infinite;
        }
        @keyframes spin {
            from {
                transform: rotate(0deg);
            }
            to {
                transform: rotate(360deg);
            }
        }
        #share-btn-container {
            display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
        }
        #share-btn {
            all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
        }
        #share-btn * {
            all: unset;
        }
        .gr-form{
            flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
        }
        #prompt-container{
            gap: 0;
        }
"""

block = gr.Blocks(css=css)

examples = [
    [
        "a Van Gogh painting of a <det-logo> with thick strokes, masterful composition",
        #        4,
        #        45,
        #        7.5,
        #        1024,
    ],
    [
        "Futuristic <det-logo> in a desert, painting, octane render, 4 k, anime sky, warm colors",
        #        4,
        #        45,
        #        7,
        #        1024,
    ],
    [
        "cell shaded cartoon of a <det-logo>, subtle colors, post grunge, concept art by josan gonzales and wlop, by james jean, victo ngai, david rubin, mike mignola, deviantart, art by artgem",
        #        4,
        #        45,
        #        7,
        #        1024,
    ],
    [
        "a <det-logo> cloudy sky background lush landscape illustration concept art anime key visual trending pixiv fanbox by wlop and greg rutkowski and makoto shinkai and studio ghibli",
        #        4,
        #        45,
        #        7,
        #        1024,
    ],
    [
        "Immense <det-logo> advanced technology scifi architectural structure desert planet alien wardrobe tim hildebrandt, wayne barlowe, bruce pennington, donato giancola, larry elmore, oil on canvas, masterpiece, trending on artstation, featured on pixiv, cinematic composition, dramatic, beautiful lighting",
        #        4,
        #        45,
        #        7,
        #        1024,
    ],
]


with block:
    gr.HTML(
        """
            <div style="text-align: center; max-width: 650px; margin: 0 auto;">
              <div
                style="
                  display: inline-flex;
                  align-items: center;
                  gap: 0.8rem;
                  font-size: 1.75rem;
                "
              >
                <h1 style="font-weight: 900; margin-bottom: 7px;">
                  Determined AI Textual Inversion Demo
                </h1>
              </div>
            </div>
        """
    )
    with gr.Group():
        with gr.Box():
            with gr.Row(elem_id="prompt-container").style(equal_height=True):
                prompt = gr.Textbox(
                    label="Enter a prompt including '<det-logo>'",
                    show_label=False,
                    max_lines=1,
                    placeholder="Enter a prompt including '<det-logo>'",
                    elem_id="prompt-text-input",
                ).style(
                    container=False,
                )
                btn = gr.Button("Generate image").style(
                    full_width=False,
                )

        gallery = gr.Gallery(
            label="Generated images", show_label=False, elem_id="gallery"
        ).style(grid=[BATCH_SIZE], height="auto")

        with gr.Group(elem_id="container-advanced-btns"):
            advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")

        with gr.Row(elem_id="advanced-options"):
            num_inference_steps = gr.Slider(
                label="Steps", minimum=20, maximum=60, value=40, step=1
            )
            guidance_scale = gr.Slider(
                label="Guidance Scale", minimum=1.0, maximum=30.0, value=4.0, step=0.1
            )
            seed = gr.Slider(
                label="Seed",
                minimum=0,
                maximum=2147483647,
                step=1,
                randomize=True,
            )

        ex = gr.Examples(
            examples=examples,
            fn=inference,
            inputs=[prompt, guidance_scale, num_inference_steps, seed],
            outputs=[gallery],
            cache_examples=False,
        )
        ex.dataset.headers = [""]

        prompt.submit(
            inference,
            inputs=[prompt, guidance_scale, num_inference_steps, seed],
            outputs=[gallery],
        )
        btn.click(
            inference,
            inputs=[prompt, guidance_scale, num_inference_steps, seed],
            outputs=[gallery],
        )
        advanced_button.click(
            None,
            [],
            prompt,
            _js="""
            () => {
                var appDom = document.querySelector("body > gradio-app");
                var options = appDom.querySelector("#advanced-options")
                if (options == null) {options = appDom.shadowRoot.querySelector("#advanced-options")}
                options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none";
            }""",
        )
        gr.HTML(
            """
                <div class="footer">
                    <p>Underlying model by <a href="https://huggingface.co/CompVis" style="text-decoration: underline;" target="_blank">CompVis</a> and <a href="https://huggingface.co/stabilityai" style="text-decoration: underline;" target="_blank">Stability AI</a> - Gradio code based on the <a href="https://huggingface.co/spaces/stabilityai/stable-diffusion" style="text-decoration: underline;" target="_blank">Stability AI Demo</a>
                    </p>
                </div>
                <div class="acknowledgments">
                    <p><h4>LICENSE</h4>
The model is licensed with a <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" style="text-decoration: underline;" target="_blank">CreativeML Open RAIL-M</a> license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" style="text-decoration: underline;" target="_blank">read the license</a></p>
<p><h4>Model Changes</h4>
The model used in this Space alters the underlying <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" style="text-decoration: underline;" target="_blank">stable-diffusion-v1-4</a> model through the addition of new embedding vectors in order to capture the likeness of the <a href="https://www.determined.ai" style="text-decoration: underline;" target="_blank">Determined AI</a> logo.</p>
                    <p><h4>Biases and content acknowledgment</h4>
Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" style="text-decoration: underline;" target="_blank">model card</a></p>
               </div>
           """
        )

block.queue(max_size=10).launch(share=True, enable_queue=True)