YunjiKim commited on
Commit
0ed69a6
1 Parent(s): 8884b70

Upload 7 files

Browse files
Files changed (7) hide show
  1. 0.png +0 -0
  2. 1.png +0 -0
  3. 5.png +0 -0
  4. app.py +338 -0
  5. requirements.txt +3 -0
  6. utils.py +105 -0
  7. valset.pkl +3 -0
0.png ADDED
1.png ADDED
5.png ADDED
app.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ import requests
5
+ import random
6
+ import os
7
+ import sys
8
+ import pickle
9
+ from PIL import Image
10
+
11
+ from tqdm.auto import tqdm
12
+ from datetime import datetime
13
+
14
+ import diffusers
15
+ from diffusers import DDIMScheduler
16
+ from transformers import CLIPTextModel, CLIPTokenizer
17
+ import torch.nn.functional as F
18
+
19
+ from utils import preprocess_mask, process_sketch, process_prompts, process_example
20
+
21
+
22
+ #################################################
23
+ #################################################
24
+ canvas_html = "<div id='canvas-root' style='max-width:400px; margin: 0 auto'></div>"
25
+ load_js = """
26
+ async () => {
27
+ const url = "https://huggingface.co/datasets/radames/gradio-components/raw/main/sketch-canvas.js"
28
+ fetch(url)
29
+ .then(res => res.text())
30
+ .then(text => {
31
+ const script = document.createElement('script');
32
+ script.type = "module"
33
+ script.src = URL.createObjectURL(new Blob([text], { type: 'application/javascript' }));
34
+ document.head.appendChild(script);
35
+ });
36
+ }
37
+ """
38
+
39
+ get_js_colors = """
40
+ async (canvasData) => {
41
+ const canvasEl = document.getElementById("canvas-root");
42
+ return [canvasEl._data]
43
+ }
44
+ """
45
+
46
+ css = '''
47
+ #color-bg{display:flex;justify-content: center;align-items: center;}
48
+ .color-bg-item{width: 100%; height: 32px}
49
+ #main_button{width:100%}
50
+ <style>
51
+ '''
52
+
53
+
54
+ #################################################
55
+ #################################################
56
+ global sreg, creg, sizereg, COUNT, creg_maps, sreg_maps, pipe, text_cond
57
+
58
+ sreg = 0
59
+ creg = 0
60
+ sizereg = 0
61
+ COUNT = 0
62
+ reg_sizes = {}
63
+ creg_maps = {}
64
+ sreg_maps = {}
65
+ text_cond = 0
66
+ device="cuda"
67
+ MAX_COLORS = 12
68
+
69
+ pipe = diffusers.StableDiffusionPipeline.from_pretrained(
70
+ "runwayml/stable-diffusion-v1-5",
71
+ variant="fp16").to(device)
72
+
73
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
74
+ pipe.scheduler.set_timesteps(50)
75
+ timesteps = pipe.scheduler.timesteps
76
+ sp_sz = pipe.unet.sample_size
77
+
78
+ with open('./valset.pkl', 'rb') as f:
79
+ val_prompt = pickle.load(f)
80
+
81
+
82
+ #################################################
83
+ #################################################
84
+ def mod_forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None):
85
+
86
+ residual = hidden_states
87
+
88
+ if self.spatial_norm is not None:
89
+ hidden_states = self.spatial_norm(hidden_states, temb)
90
+
91
+ input_ndim = hidden_states.ndim
92
+
93
+ if input_ndim == 4:
94
+ batch_size, channel, height, width = hidden_states.shape
95
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
96
+
97
+ batch_size, sequence_length, _ = (hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape)
98
+ attention_mask = self.prepare_attention_mask(attention_mask, sequence_length, batch_size)
99
+
100
+ if self.group_norm is not None:
101
+ hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
102
+
103
+ query = self.to_q(hidden_states)
104
+
105
+ global sreg, creg, COUNT, creg_maps, sreg_maps, reg_sizes, text_cond
106
+
107
+ sa_ = True if encoder_hidden_states is None else False
108
+ encoder_hidden_states = text_cond if encoder_hidden_states is not None else hidden_states
109
+
110
+ if self.norm_cross:
111
+ encoder_hidden_states = self.norm_encoder_hidden_states(encoder_hidden_states)
112
+
113
+ key = self.to_k(encoder_hidden_states)
114
+ value = self.to_v(encoder_hidden_states)
115
+
116
+ query = self.head_to_batch_dim(query)
117
+ key = self.head_to_batch_dim(key)
118
+ value = self.head_to_batch_dim(value)
119
+
120
+ if COUNT/32 < 50*0.3:
121
+
122
+ dtype = query.dtype
123
+ if self.upcast_attention:
124
+ query = query.float()
125
+ key = key.float()
126
+
127
+ sim = torch.baddbmm(torch.empty(query.shape[0], query.shape[1], key.shape[1],
128
+ dtype=query.dtype, device=query.device),
129
+ query, key.transpose(-1, -2), beta=0, alpha=self.scale)
130
+
131
+ treg = torch.pow(timesteps[COUNT//32]/1000, 5)
132
+
133
+ ## reg at self-attn
134
+ if sa_:
135
+ min_value = sim[int(sim.size(0)/2):].min(-1)[0].unsqueeze(-1)
136
+ max_value = sim[int(sim.size(0)/2):].max(-1)[0].unsqueeze(-1)
137
+ mask = sreg_maps[sim.size(1)].repeat(self.heads,1,1)
138
+ size_reg = reg_sizes[sim.size(1)].repeat(self.heads,1,1)
139
+
140
+ sim[int(sim.size(0)/2):] += (mask>0)*size_reg*sreg*treg*(max_value-sim[int(sim.size(0)/2):])
141
+ sim[int(sim.size(0)/2):] -= ~(mask>0)*size_reg*sreg*treg*(sim[int(sim.size(0)/2):]-min_value)
142
+
143
+ ## reg at cross-attn
144
+ else:
145
+ min_value = sim[int(sim.size(0)/2):].min(-1)[0].unsqueeze(-1)
146
+ max_value = sim[int(sim.size(0)/2):].max(-1)[0].unsqueeze(-1)
147
+ mask = creg_maps[sim.size(1)].repeat(self.heads,1,1)
148
+ size_reg = reg_sizes[sim.size(1)].repeat(self.heads,1,1)
149
+
150
+ sim[int(sim.size(0)/2):] += (mask>0)*size_reg*creg*treg*(max_value-sim[int(sim.size(0)/2):])
151
+ sim[int(sim.size(0)/2):] -= ~(mask>0)*size_reg*creg*treg*(sim[int(sim.size(0)/2):]-min_value)
152
+
153
+ attention_probs = sim.softmax(dim=-1)
154
+ attention_probs = attention_probs.to(dtype)
155
+
156
+ else:
157
+ attention_probs = self.get_attention_scores(query, key, attention_mask)
158
+
159
+ COUNT += 1
160
+
161
+ hidden_states = torch.bmm(attention_probs, value)
162
+ hidden_states = self.batch_to_head_dim(hidden_states)
163
+
164
+ # linear proj
165
+ hidden_states = self.to_out[0](hidden_states)
166
+ # dropout
167
+ hidden_states = self.to_out[1](hidden_states)
168
+
169
+ if input_ndim == 4:
170
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
171
+
172
+ if self.residual_connection:
173
+ hidden_states = hidden_states + residual
174
+
175
+ hidden_states = hidden_states / self.rescale_output_factor
176
+
177
+ return hidden_states
178
+
179
+ for _module in pipe.unet.modules():
180
+ if _module.__class__.__name__ == "Attention":
181
+ _module.__class__.__call__ = mod_forward
182
+
183
+
184
+ #################################################
185
+ #################################################
186
+ def process_generation(binary_matrixes, seed, creg_, sreg_, sizereg_, bsz, master_prompt, *prompts):
187
+
188
+ global creg, sreg, sizereg
189
+ creg, sreg, sizereg = creg_, sreg_, sizereg_
190
+
191
+ clipped_prompts = prompts[:len(binary_matrixes)]
192
+ prompts = [master_prompt] + list(clipped_prompts)
193
+ layouts = torch.cat([preprocess_mask(mask_, sp_sz, sp_sz, device) for mask_ in binary_matrixes])
194
+
195
+ text_input = pipe.tokenizer(prompts, padding="max_length", return_length=True, return_overflowing_tokens=False,
196
+ max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt")
197
+ cond_embeddings = pipe.text_encoder(text_input.input_ids.to(device))[0]
198
+
199
+ uncond_input = pipe.tokenizer([""]*bsz, padding="max_length", max_length=pipe.tokenizer.model_max_length,
200
+ truncation=True, return_tensors="pt")
201
+ uncond_embeddings = pipe.text_encoder(uncond_input.input_ids.to(device))[0]
202
+
203
+
204
+ ###########################
205
+ ###### prep for sreg ######
206
+ ###########################
207
+ global sreg_maps, reg_sizes
208
+ sreg_maps = {}
209
+ reg_sizes = {}
210
+
211
+ for r in range(4):
212
+ res = int(sp_sz/np.power(2,r))
213
+ layouts_s = F.interpolate(layouts,(res, res),mode='nearest')
214
+ layouts_s = (layouts_s.view(layouts_s.size(0),1,-1)*layouts_s.view(layouts_s.size(0),-1,1)).sum(0).unsqueeze(0).repeat(bsz,1,1)
215
+ reg_sizes[np.power(res, 2)] = 1-sizereg*layouts_s.sum(-1, keepdim=True)/(np.power(res, 2))
216
+ sreg_maps[np.power(res, 2)] = layouts_s
217
+
218
+
219
+ ###########################
220
+ ###### prep for creg ######
221
+ ###########################
222
+ pww_maps = torch.zeros(1,77,sp_sz,sp_sz).to(device)
223
+ for i in range(1,len(prompts)):
224
+ wlen = text_input['length'][i] - 2
225
+ widx = text_input['input_ids'][i][1:1+wlen]
226
+ for j in range(77):
227
+ try:
228
+ if (text_input['input_ids'][0][j:j+wlen] == widx).sum() == wlen:
229
+ pww_maps[:,j:j+wlen,:,:] = layouts[i-1:i]
230
+ cond_embeddings[0][j:j+wlen] = cond_embeddings[i][1:1+wlen]
231
+ break
232
+ except:
233
+ raise gr.Error("Please check whether every segment prompt is included in the full text !")
234
+ return
235
+
236
+ global creg_maps
237
+ creg_maps = {}
238
+ for r in range(4):
239
+ res = int(sp_sz/np.power(2,r))
240
+ layout_c = F.interpolate(pww_maps,(res,res),mode='nearest').view(1,77,-1).permute(0,2,1).repeat(bsz,1,1)
241
+ creg_maps[np.power(res, 2)] = layout_c
242
+
243
+
244
+ ###########################
245
+ #### prep for text_emb ####
246
+ ###########################
247
+ global text_cond
248
+ text_cond = torch.cat([uncond_embeddings, cond_embeddings[:1].repeat(bsz,1,1)])
249
+
250
+ global COUNT
251
+ COUNT = 0
252
+
253
+ if seed == -1:
254
+ latents = torch.randn(bsz,4,sp_sz,sp_sz).to(device)
255
+ else:
256
+ latents = torch.randn(bsz,4,sp_sz,sp_sz, generator=torch.Generator().manual_seed(seed)).to(device)
257
+
258
+ image = pipe(prompts[:1]*bsz, latents=latents).images
259
+
260
+ return(image)
261
+
262
+
263
+ #################################################
264
+ #################################################
265
+ ### define the interface
266
+ with gr.Blocks(css=css) as demo:
267
+ binary_matrixes = gr.State([])
268
+ color_layout = gr.State([])
269
+ gr.Markdown('''## DenseDiffusion: Dense Text-to-Image Generation with Attention Modulation''')
270
+ gr.Markdown('''
271
+ #### 😺 Instruction to generate images 😺 <br>
272
+ (1) Create the image layout. <br>
273
+ (2) Label each segment with a text prompt. <br>
274
+ (3) Adjust the full text. The default full text is automatically concatenated from each segment's text. The default one works well, but refineing the full text will further improve the result. <br>
275
+ (4) Check the generated images, and tune the hyperparameters if needed. <br>
276
+ &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; - w<sup>c</sup> : The degree of attention modulation at cross-attention layers. <br>
277
+ &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; - w<sup>s</sup> : The degree of attention modulation at self-attention layers. <br>
278
+ ''')
279
+
280
+ with gr.Row():
281
+ with gr.Box(elem_id="main-image"):
282
+ canvas_data = gr.JSON(value={}, visible=False)
283
+ canvas = gr.HTML(canvas_html)
284
+ button_run = gr.Button("(1) I've finished my sketch ! 😺", elem_id="main_button", interactive=True)
285
+
286
+ prompts = []
287
+ colors = []
288
+ color_row = [None] * MAX_COLORS
289
+ with gr.Column(visible=False) as post_sketch:
290
+ for n in range(MAX_COLORS):
291
+ if n == 0 :
292
+ with gr.Row(visible=False) as color_row[n]:
293
+ colors.append(gr.Image(shape=(100, 100), label="background", type="pil", image_mode="RGB", width=100, height=100))
294
+ prompts.append(gr.Textbox(label="Prompt for the background (white region)", value=""))
295
+ else:
296
+ with gr.Row(visible=False) as color_row[n]:
297
+ colors.append(gr.Image(shape=(100, 100), label="segment "+str(n), type="pil", image_mode="RGB", width=100, height=100))
298
+ prompts.append(gr.Textbox(label="Prompt for the segment "+str(n)))
299
+
300
+ get_genprompt_run = gr.Button("(2) I've finished segment labeling ! 😺", elem_id="prompt_button", interactive=True)
301
+
302
+ with gr.Column(visible=False) as gen_prompt_vis:
303
+ general_prompt = gr.Textbox(value='', label="(3) Textual Description for the entire image", interactive=True)
304
+ with gr.Accordion("(4) Tune the hyperparameters", open=False):
305
+ creg_ = gr.Slider(label=" w\u1D9C (The degree of attention modulation at cross-attention layers) ", minimum=0, maximum=2., value=1.0, step=0.1)
306
+ sreg_ = gr.Slider(label=" w \u02E2 (The degree of attention modulation at self-attention layers) ", minimum=0, maximum=2., value=0.3, step=0.1)
307
+ sizereg_ = gr.Slider(label="The degree of mask-area adaptive adjustment", minimum=0, maximum=1., value=1., step=0.1)
308
+ bsz_ = gr.Slider(label="Number of Samples to generate", minimum=1, maximum=4, value=1, step=1)
309
+ seed_ = gr.Slider(label="Seed", minimum=-1, maximum=999999999, value=-1, step=1)
310
+
311
+ final_run_btn = gr.Button("Generate ! 😺")
312
+
313
+ layout_path = gr.Textbox(label="layout_path", visible=False)
314
+ all_prompts = gr.Textbox(label="all_prompts", visible=False)
315
+
316
+ with gr.Column():
317
+ out_image = gr.Gallery(label="Result", columns=2, height='auto')
318
+
319
+ button_run.click(process_sketch, inputs=[canvas_data], outputs=[post_sketch, binary_matrixes, *color_row, *colors], _js=get_js_colors, queue=False)
320
+
321
+ get_genprompt_run.click(process_prompts, inputs=[binary_matrixes, *prompts], outputs=[gen_prompt_vis, general_prompt], queue=False)
322
+
323
+ final_run_btn.click(process_generation, inputs=[binary_matrixes, seed_, creg_, sreg_, sizereg_, bsz_, general_prompt, *prompts], outputs=out_image)
324
+
325
+ gr.Examples(
326
+ examples=[['0.png', '***'.join([val_prompt[0]['textual_condition']] + val_prompt[0]['segment_descriptions']), 381940206],
327
+ ['1.png', '***'.join([val_prompt[1]['textual_condition']] + val_prompt[1]['segment_descriptions']), 307504592],
328
+ ['5.png', '***'.join([val_prompt[5]['textual_condition']] + val_prompt[5]['segment_descriptions']), 114972190]],
329
+ inputs=[layout_path, all_prompts, seed_],
330
+ outputs=[post_sketch, binary_matrixes, *color_row, *colors, *prompts, gen_prompt_vis, general_prompt, seed_],
331
+ fn=process_example,
332
+ run_on_click=True,
333
+ label='😺 Examples 😺',
334
+ )
335
+
336
+ demo.load(None, None, None, _js=load_js)
337
+
338
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ diffusers==0.20.2
2
+ transformers==4.28.0
3
+ accelerate
utils.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import base64
3
+ import gradio as gr
4
+ import numpy as np
5
+ from PIL import Image
6
+ from io import BytesIO
7
+
8
+ MAX_COLORS = 12
9
+
10
+
11
+ def create_binary_matrix(img_arr, target_color):
12
+ mask = np.all(img_arr == target_color, axis=-1)
13
+ binary_matrix = mask.astype(int)
14
+ return binary_matrix
15
+
16
+ def preprocess_mask(mask_, h, w, device):
17
+ mask = np.array(mask_)
18
+ mask = mask.astype(np.float32)
19
+ mask = mask[None, None]
20
+ mask[mask < 0.5] = 0
21
+ mask[mask >= 0.5] = 1
22
+ mask = torch.from_numpy(mask).to(device)
23
+ mask = torch.nn.functional.interpolate(mask, size=(h, w), mode='nearest')
24
+ return mask
25
+
26
+ def process_sketch(canvas_data):
27
+ binary_matrixes = []
28
+ base64_img = canvas_data['image']
29
+ image_data = base64.b64decode(base64_img.split(',')[1])
30
+ image = Image.open(BytesIO(image_data)).convert("RGB")
31
+ im2arr = np.array(image)
32
+ colors = [tuple(map(int, rgb[4:-1].split(','))) for rgb in canvas_data['colors']]
33
+ colors_fixed = []
34
+
35
+ r, g, b = 255, 255, 255
36
+ binary_matrix = create_binary_matrix(im2arr, (r,g,b))
37
+ binary_matrixes.append(binary_matrix)
38
+ binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
39
+ colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50)
40
+ colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
41
+
42
+ for color in colors:
43
+ r, g, b = color
44
+ if any(c != 255 for c in (r, g, b)):
45
+ binary_matrix = create_binary_matrix(im2arr, (r,g,b))
46
+ binary_matrixes.append(binary_matrix)
47
+ binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
48
+ colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50)
49
+ colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
50
+
51
+ visibilities = []
52
+ colors = []
53
+ for n in range(MAX_COLORS):
54
+ visibilities.append(gr.update(visible=False))
55
+ colors.append(gr.update())
56
+ for n in range(len(colors_fixed)):
57
+ visibilities[n] = gr.update(visible=True)
58
+ colors[n] = colors_fixed[n]
59
+
60
+ return [gr.update(visible=True), binary_matrixes, *visibilities, *colors]
61
+
62
+ def process_prompts(binary_matrixes, *seg_prompts):
63
+ return [gr.update(visible=True), gr.update(value=' , '.join(seg_prompts[:len(binary_matrixes)]))]
64
+
65
+ def process_example(layout_path, all_prompts, seed_):
66
+
67
+ all_prompts = all_prompts.split('***')
68
+
69
+ binary_matrixes = []
70
+ colors_fixed = []
71
+
72
+ im2arr = np.array(Image.open(layout_path))[:,:,:3]
73
+ unique, counts = np.unique(np.reshape(im2arr,(-1,3)), axis=0, return_counts=True)
74
+ sorted_idx = np.argsort(-counts)
75
+
76
+ binary_matrix = create_binary_matrix(im2arr, (0,0,0))
77
+ binary_matrixes.append(binary_matrix)
78
+ binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
79
+ colored_map = binary_matrix_*(255,255,255) + (1-binary_matrix_)*(50,50,50)
80
+ colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
81
+
82
+ for i in range(len(all_prompts)-1):
83
+ r, g, b = unique[sorted_idx[i]]
84
+ if any(c != 255 for c in (r, g, b)) and any(c != 0 for c in (r, g, b)):
85
+ binary_matrix = create_binary_matrix(im2arr, (r,g,b))
86
+ binary_matrixes.append(binary_matrix)
87
+ binary_matrix_ = np.repeat(np.expand_dims(binary_matrix, axis=(-1)), 3, axis=(-1))
88
+ colored_map = binary_matrix_*(r,g,b) + (1-binary_matrix_)*(50,50,50)
89
+ colors_fixed.append(gr.update(value=colored_map.astype(np.uint8)))
90
+
91
+ visibilities = []
92
+ colors = []
93
+ prompts = []
94
+ for n in range(MAX_COLORS):
95
+ visibilities.append(gr.update(visible=False))
96
+ colors.append(gr.update())
97
+ prompts.append(gr.update())
98
+
99
+ for n in range(len(colors_fixed)):
100
+ visibilities[n] = gr.update(visible=True)
101
+ colors[n] = colors_fixed[n]
102
+ prompts[n] = all_prompts[n+1]
103
+
104
+ return [gr.update(visible=True), binary_matrixes, *visibilities, *colors, *prompts,
105
+ gr.update(visible=True), gr.update(value=all_prompts[0]), int(seed_)]
valset.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbe1fe2b895eb122f9ef33d550ee6a29a8f1a5c1ed31594efc4779edf308b58e
3
+ size 3249