SamirXR ai-forever commited on
Commit
bbbb497
·
0 Parent(s):

Duplicate from ai-forever/Kandinsky2.1

Browse files

Co-authored-by: ai-forever <[email protected]>

Files changed (7) hide show
  1. .gitattributes +34 -0
  2. NatallE.png +0 -0
  3. README.md +13 -0
  4. app.py +253 -0
  5. kandi2.png +0 -0
  6. packages.txt +3 -0
  7. requirements.txt +4 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
NatallE.png ADDED
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Kandinsky2.1
3
+ emoji: 📉
4
+ colorFrom: indigo
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.11.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: ai-forever/Kandinsky2.1
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ import torch
5
+ from torch import autocast
6
+ from kandinsky2 import get_kandinsky2
7
+
8
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
+
10
+
11
+ from kandinsky2 import get_kandinsky2
12
+ model = get_kandinsky2('cuda', task_type='text2img', model_version='2.1', use_flash_attention=False)
13
+
14
+
15
+ """
16
+ num_steps=50,
17
+ batch_size=4,
18
+ guidance_scale=7,
19
+ h=768,
20
+ w=768,
21
+ sampler='ddim_sampler',
22
+ prior_cf_scale=1,
23
+ prior_steps='25',
24
+ """
25
+ def infer(prompt, negative='low quality, bad quality'):
26
+ images = model.generate_text2img(prompt,
27
+ negative_prior_prompt=negative,
28
+ negative_decoder_prompt=negative,
29
+ num_steps=50,
30
+ batch_size=1,
31
+ guidance_scale=4,
32
+ h=768, w=768,
33
+ sampler='ddim_sampler',
34
+ prior_cf_scale=1,
35
+ prior_steps="25",)
36
+ return images
37
+
38
+ css = """
39
+ .gradio-container {
40
+ font-family: 'IBM Plex Sans', sans-serif;
41
+ }
42
+ .gr-button {
43
+ color: white;
44
+ border-color: black;
45
+ background: black;
46
+ }
47
+ input[type='range'] {
48
+ accent-color: black;
49
+ }
50
+ .dark input[type='range'] {
51
+ accent-color: #dfdfdf;
52
+ }
53
+ .container {
54
+ max-width: 730px;
55
+ margin: auto;
56
+ padding-top: 1.5rem;
57
+ }
58
+ #gallery {
59
+ min-height: 22rem;
60
+ margin-bottom: 15px;
61
+ margin-left: auto;
62
+ margin-right: auto;
63
+ border-bottom-right-radius: .5rem !important;
64
+ border-bottom-left-radius: .5rem !important;
65
+ }
66
+ #gallery>div>.h-full {
67
+ min-height: 20rem;
68
+ }
69
+ .details:hover {
70
+ text-decoration: underline;
71
+ }
72
+ .gr-button {
73
+ white-space: nowrap;
74
+ }
75
+ .gr-button:focus {
76
+ border-color: rgb(147 197 253 / var(--tw-border-opacity));
77
+ outline: none;
78
+ box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
79
+ --tw-border-opacity: 1;
80
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
81
+ --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
82
+ --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
83
+ --tw-ring-opacity: .5;
84
+ }
85
+ #advanced-btn {
86
+ font-size: .7rem !important;
87
+ line-height: 19px;
88
+ margin-top: 12px;
89
+ margin-bottom: 12px;
90
+ padding: 2px 8px;
91
+ border-radius: 14px !important;
92
+ }
93
+ #advanced-options {
94
+ display: none;
95
+ margin-bottom: 20px;
96
+ }
97
+ .footer {
98
+ margin-bottom: 45px;
99
+ margin-top: 35px;
100
+ text-align: center;
101
+ border-bottom: 1px solid #e5e5e5;
102
+ }
103
+ .footer>p {
104
+ font-size: .8rem;
105
+ display: inline-block;
106
+ padding: 0 10px;
107
+ transform: translateY(10px);
108
+ background: white;
109
+ }
110
+ .dark .footer {
111
+ border-color: #303030;
112
+ }
113
+ .dark .footer>p {
114
+ background: #0b0f19;
115
+ }
116
+ .acknowledgments h4{
117
+ margin: 1.25em 0 .25em 0;
118
+ font-weight: bold;
119
+ font-size: 115%;
120
+ }
121
+ #container-advanced-btns{
122
+ display: flex;
123
+ flex-wrap: wrap;
124
+ justify-content: space-between;
125
+ align-items: center;
126
+ }
127
+ .animate-spin {
128
+ animation: spin 1s linear infinite;
129
+ }
130
+ @keyframes spin {
131
+ from {
132
+ transform: rotate(0deg);
133
+ }
134
+ to {
135
+ transform: rotate(360deg);
136
+ }
137
+ }
138
+ #share-btn-container {
139
+ display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
140
+ }
141
+ #share-btn {
142
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
143
+ }
144
+ #share-btn * {
145
+ all: unset;
146
+ }
147
+ .gr-form{
148
+ flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
149
+ }
150
+ #prompt-container{
151
+ gap: 0;
152
+ }
153
+ #generated_id{
154
+ min-height: 700px
155
+ }
156
+ """
157
+ block = gr.Blocks(css=css)
158
+
159
+ examples = [
160
+
161
+ [
162
+ 'Thinking man in anime style'
163
+ ],
164
+
165
+ ]
166
+
167
+ SPACE_ID = os.getenv('SPACE_ID')
168
+
169
+ with block as demo:
170
+ gr.Markdown(f"""
171
+
172
+
173
+ [![Framework: PyTorch](https://img.shields.io/badge/Framework-PyTorch-orange.svg)](https://pytorch.org/) [![Huggingface space](https://img.shields.io/badge/🤗-Huggingface-yello.svg)](https://huggingface.co/sberbank-ai/Kandinsky_2.0)
174
+
175
+ <p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>
176
+
177
+ [Offical BlogPost](https://habr.com/ru/company/sberbank/blog/725282/)
178
+ [Offical Telegram Bot](https://t.me/kandinsky21_bot)
179
+ [Offical site](https://fusionbrain.ai/diffusion)
180
+
181
+ ## Model architecture:
182
+ Kandinsky 2.1 inherits best practicies from Dall-E 2 and Latent diffusion, while introducing some new ideas.
183
+
184
+ As text and image encoder it uses CLIP model and diffusion image prior (mapping) between latent spaces of CLIP modalities. This approach increases the visual performance of the model and unveils new horizons in blending images and text-guided image manipulation.
185
+
186
+ For diffusion mapping of latent spaces we use transformer with num_layers=20, num_heads=32 and hidden_size=2048.
187
+
188
+ Other architecture parts:
189
+
190
+ - Text encoder (XLM-Roberta-Large-Vit-L-14) - 560M
191
+ - Diffusion Image Prior — 1B
192
+ - CLIP image encoder (ViT-L/14) - 427M
193
+ - Latent Diffusion U-Net - 1.22B
194
+ - MoVQ encoder/decoder - 67M
195
+
196
+ Kandinsky 2.1 was trained on a large-scale image-text dataset LAION HighRes and fine-tuned on our internal datasets.
197
+
198
+ **Kandinsky 2.1** architecture overview:
199
+
200
+ ![](https://raw.githubusercontent.com/ai-forever/Kandinsky-2/main/content/einstein.png)
201
+
202
+
203
+ """
204
+ )
205
+ with gr.Group():
206
+ with gr.Box():
207
+ with gr.Row().style(mobile_collapse=False, equal_height=True):
208
+
209
+ text = gr.Textbox(
210
+ label="Enter your prompt", show_label=True, max_lines=2
211
+ ).style(
212
+ border=(True, False, True, True),
213
+ rounded=(True, False, False, True),
214
+ container=False,
215
+ )
216
+ negative = gr.Textbox(
217
+ label="Enter your negative prompt", show_label=True, max_lines=2
218
+ ).style(
219
+ border=(True, False, True, True),
220
+ rounded=(True, False, False, True),
221
+ container=False,
222
+ )
223
+ btn = gr.Button("Run").style(
224
+ margin=False,
225
+ rounded=(False, True, True, False),
226
+ )
227
+
228
+ gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="generated_id").style(
229
+ grid=[2], height="auto"
230
+ )
231
+
232
+ ex = gr.Examples(examples=examples, fn=infer, inputs=[text, negative], outputs=gallery, cache_examples=True)
233
+ ex.dataset.headers = [""]
234
+
235
+ text.submit(infer, inputs=[text, negative], outputs=gallery)
236
+ btn.click(infer, inputs=[text, negative], outputs=gallery)
237
+ gr.Markdown("""
238
+
239
+
240
+ # Authors
241
+
242
+ + Arseniy Shakhmatov: [Github](https://github.com/cene555), [Blog](https://t.me/gradientdip)
243
+ + Anton Razzhigaev: [Github](https://github.com/razzant), [Blog](https://t.me/abstractDL)
244
+ + Aleksandr Nikolich: [Github](https://github.com/AlexWortega), [Blog](https://t.me/lovedeathtransformers)
245
+ + Vladimir Arkhipkin: [Github](https://github.com/oriBetelgeuse)
246
+ + Igor Pavlov: [Github](https://github.com/boomb0om)
247
+ + Andrey Kuznetsov: [Github](https://github.com/kuznetsoffandrey)
248
+ + Denis Dimitrov: [Github](https://github.com/denndimitrov)
249
+
250
+ """
251
+ )
252
+
253
+ demo.queue(max_size=15).launch()
kandi2.png ADDED
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ git+https://github.com/ai-forever/Kandinsky-2.git
2
+ gradio
3
+ opencv-python
4
+ git+https://github.com/openai/CLIP.git