Faran Fahandezh commited on
Commit
ddbbf37
·
1 Parent(s): 51ae612

Add application file

Browse files
Files changed (46) hide show
  1. app.py +514 -0
  2. ckpt/model250000.pt +3 -0
  3. ckpt/readme.txt +0 -0
  4. color_guide.png +0 -0
  5. generated_svgs/readme.txt +0 -0
  6. house_diffusion/__init__.py +3 -0
  7. house_diffusion/__pycache__/__init__.cpython-311.pyc +0 -0
  8. house_diffusion/__pycache__/__init__.cpython-39.pyc +0 -0
  9. house_diffusion/__pycache__/dist_util.cpython-311.pyc +0 -0
  10. house_diffusion/__pycache__/dist_util.cpython-39.pyc +0 -0
  11. house_diffusion/__pycache__/fp16_util.cpython-311.pyc +0 -0
  12. house_diffusion/__pycache__/fp16_util.cpython-39.pyc +0 -0
  13. house_diffusion/__pycache__/gaussian_diffusion.cpython-311.pyc +0 -0
  14. house_diffusion/__pycache__/gaussian_diffusion.cpython-39.pyc +0 -0
  15. house_diffusion/__pycache__/logger.cpython-311.pyc +0 -0
  16. house_diffusion/__pycache__/logger.cpython-39.pyc +0 -0
  17. house_diffusion/__pycache__/losses.cpython-311.pyc +0 -0
  18. house_diffusion/__pycache__/losses.cpython-39.pyc +0 -0
  19. house_diffusion/__pycache__/nn.cpython-311.pyc +0 -0
  20. house_diffusion/__pycache__/nn.cpython-39.pyc +0 -0
  21. house_diffusion/__pycache__/resample.cpython-311.pyc +0 -0
  22. house_diffusion/__pycache__/resample.cpython-39.pyc +0 -0
  23. house_diffusion/__pycache__/respace.cpython-311.pyc +0 -0
  24. house_diffusion/__pycache__/respace.cpython-39.pyc +0 -0
  25. house_diffusion/__pycache__/rplanhg_datasets.cpython-311.pyc +0 -0
  26. house_diffusion/__pycache__/rplanhg_datasets.cpython-39.pyc +0 -0
  27. house_diffusion/__pycache__/script_util.cpython-311.pyc +0 -0
  28. house_diffusion/__pycache__/script_util.cpython-39.pyc +0 -0
  29. house_diffusion/__pycache__/train_util.cpython-311.pyc +0 -0
  30. house_diffusion/__pycache__/train_util.cpython-39.pyc +0 -0
  31. house_diffusion/__pycache__/transformer.cpython-311.pyc +0 -0
  32. house_diffusion/__pycache__/transformer.cpython-39.pyc +0 -0
  33. house_diffusion/__pycache__/transformer_models.cpython-39.pyc +0 -0
  34. house_diffusion/dist_util.py +94 -0
  35. house_diffusion/fp16_util.py +236 -0
  36. house_diffusion/gaussian_diffusion.py +1013 -0
  37. house_diffusion/logger.py +496 -0
  38. house_diffusion/losses.py +77 -0
  39. house_diffusion/nn.py +172 -0
  40. house_diffusion/resample.py +154 -0
  41. house_diffusion/respace.py +128 -0
  42. house_diffusion/rplanhg_datasets.py +620 -0
  43. house_diffusion/script_util.py +182 -0
  44. house_diffusion/train_util.py +416 -0
  45. house_diffusion/transformer.py +284 -0
  46. house_diffusion/transformer_models.py +228 -0
app.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import numpy as np
4
+ import torch as th
5
+ from house_diffusion import dist_util
6
+ from house_diffusion.script_util import (
7
+ create_model_and_diffusion,
8
+ )
9
+ from PIL import Image
10
+
11
+ import io
12
+ import drawSvg as drawsvg
13
+ import cairosvg
14
+ from tqdm import tqdm
15
+
16
+ import webcolors
17
+ import tempfile
18
+ from pathlib import Path
19
+ import shutil
20
+ import os
21
+
22
+
23
+ ROOM_CLASS = {
24
+ 'Living Room': 1, 'Kitchen': 2, 'Bedroom': 3, 'Bathroom': 4,
25
+ 'Balcony': 5, 'Entrance': 6, 'Dining Room': 7, 'Study Room': 8,
26
+ 'Storage': 10, 'Front Door': 11, 'Unknown': 13, 'Interior Door': 12
27
+ }
28
+ ROOM_CATEGORIES = {
29
+ 'Living Room': 1, 'Kitchen': 2, 'Bedroom': 3, 'Bathroom': 4,
30
+ 'Balcony': 5, 'Entrance': 6, 'Dining Room': 7, 'Study Room': 8,
31
+ 'Storage': 10, 'Front Door': 11, 'Other': 13
32
+ }
33
+
34
+
35
+ def save_samples(
36
+ sample, ext, model_kwargs,
37
+ tmp_count, num_room_types,
38
+ # save_gif=False,
39
+ save_gif=True,
40
+ door_indices=[11, 12, 13], ID_COLOR=None,
41
+ is_syn=False, draw_graph=False, save_svg=False, metrics=False):
42
+ prefix = 'syn_' if is_syn else ''
43
+ graph_errors = []
44
+
45
+ print(sample.shape)
46
+
47
+ if not save_gif:
48
+ sample = sample[-1:]
49
+ for i in tqdm(range(sample.shape[1])):
50
+ resolution = 256
51
+ images = []
52
+ images2 = []
53
+ images3 = []
54
+ for k in range(sample.shape[0]):
55
+ draw_color = drawsvg.Drawing(resolution, resolution, displayInline=False)
56
+ draw_color.append(drawsvg.Rectangle(0, 0, resolution, resolution, fill='white'))
57
+ polys = []
58
+ types = []
59
+ for j, point in (enumerate(sample[k][i])):
60
+ if model_kwargs[f'{prefix}src_key_padding_mask'][i][j] == 1:
61
+ continue
62
+ point = point.cpu().data.numpy()
63
+ if j == 0:
64
+ poly = []
65
+ if j > 0 and (model_kwargs[f'{prefix}room_indices'][i, j] != model_kwargs[f'{prefix}room_indices'][
66
+ i, j - 1]).any():
67
+ polys.append(poly)
68
+ types.append(c)
69
+ poly = []
70
+ pred_center = False
71
+ if pred_center:
72
+ point = point / 2 + 1
73
+ point = point * resolution // 2
74
+ else:
75
+ point = point / 2 + 0.5
76
+ point = point * resolution
77
+ poly.append((point[0], point[1]))
78
+ c = np.argmax(model_kwargs[f'{prefix}room_types'][i][j - 1].cpu().numpy())
79
+ polys.append(poly)
80
+ types.append(c)
81
+ for poly, c in zip(polys, types):
82
+ if c in door_indices or c == 0:
83
+ continue
84
+ room_type = c
85
+ c = webcolors.hex_to_rgb(ID_COLOR[c])
86
+ draw_color.append(
87
+ drawsvg.Lines(*np.array(poly).flatten().tolist(), close=True, fill=ID_COLOR[room_type],
88
+ fill_opacity=1.0, stroke='black', stroke_width=1))
89
+
90
+ for poly, c in zip(polys, types):
91
+ if c not in door_indices:
92
+ continue
93
+ room_type = c
94
+ c = webcolors.hex_to_rgb(ID_COLOR[c])
95
+
96
+ # TODO --------------------------------------------------------------------------------------
97
+ # https://github.com/sakmalh/house_diffusion
98
+ line_lengths = [np.linalg.norm(np.array(poly[i]) - np.array(poly[(i + 1) % len(poly)])) for i in
99
+ range(len(poly))]
100
+
101
+ if metrics:
102
+ text_size = 5
103
+ for z, length in enumerate(line_lengths):
104
+ # Calculate the mid-point of the line segment
105
+ midpoint = ((poly[z][0] + poly[(z + 1) % len(poly)][0]) / 2,
106
+ (poly[z][1] + poly[(z + 1) % len(poly)][1]) / 2)
107
+
108
+ # Calculate x and y differences
109
+ x_diff = poly[z][0] - poly[(z + 1) % len(poly)][0]
110
+ y_diff = poly[z][1] - poly[(z + 1) % len(poly)][1]
111
+
112
+ # Determine text position adjustments based on differences
113
+ if int(y_diff) != 0:
114
+ if y_diff > 0:
115
+ text_x = midpoint[0] + text_size
116
+ text_y = midpoint[1]
117
+
118
+ draw_color.append(drawsvg.Line(
119
+ text_x, text_y + text_size, # Start point at the text label
120
+ poly[z][0] + text_size, poly[z][1], # End point at the polygon endpoint
121
+ stroke='black',
122
+ stroke_width=1
123
+ ))
124
+
125
+ draw_color.append(drawsvg.Line(
126
+ text_x, text_y - text_size, # Start point at the text label
127
+ poly[(z + 1) % len(poly)][0] + text_size, poly[(z + 1) % len(poly)][1],
128
+ # End point at the polygon endpoint
129
+ stroke='black',
130
+ stroke_width=1
131
+ ))
132
+ else:
133
+ text_x = midpoint[0] - text_size
134
+ text_y = midpoint[1]
135
+
136
+ draw_color.append(drawsvg.Line(
137
+ text_x, text_y - text_size, # Start point at the text label
138
+ poly[z][0] - text_size, poly[z][1], # End point at the polygon endpoint
139
+ stroke='black',
140
+ stroke_width=1
141
+ ))
142
+
143
+ draw_color.append(drawsvg.Line(
144
+ text_x, text_y + text_size, # Start point at the text label
145
+ poly[(z + 1) % len(poly)][0] - text_size, poly[(z + 1) % len(poly)][1],
146
+ # End point at the polygon endpoint
147
+ stroke='black',
148
+ stroke_width=1
149
+ ))
150
+ else:
151
+ if x_diff > 0:
152
+ text_x = midpoint[0]
153
+ text_y = midpoint[1] - text_size
154
+
155
+ draw_color.append(drawsvg.Line(
156
+ text_x + text_size, text_y, # Start point at the text label
157
+ poly[z][0], poly[z][1] - text_size, # End point at the polygon endpoint
158
+ stroke='black',
159
+ stroke_width=1
160
+ ))
161
+
162
+ draw_color.append(drawsvg.Line(
163
+ text_x - text_size, text_y, # Start point at the text label
164
+ poly[(z + 1) % len(poly)][0], poly[(z + 1) % len(poly)][1] - text_size,
165
+ # End point at the polygon endpoint
166
+ stroke='black',
167
+ stroke_width=1
168
+ ))
169
+ else:
170
+ text_x = midpoint[0]
171
+ text_y = midpoint[1] + text_size
172
+
173
+ draw_color.append(drawsvg.Line(
174
+ text_x - text_size, text_y, # Start point at the text label
175
+ poly[z][0], poly[z][1] + text_size, # End point at the polygon endpoint
176
+ stroke='black',
177
+ stroke_width=1
178
+ ))
179
+
180
+ draw_color.append(drawsvg.Line(
181
+ text_x + text_size, text_y, # Start point at the text label
182
+ poly[(z + 1) % len(poly)][0], poly[(z + 1) % len(poly)][1] + text_size,
183
+ # End point at the polygon endpoint
184
+ stroke='black',
185
+ stroke_width=1
186
+ ))
187
+
188
+ # Add the text label to the SVG
189
+ draw_color.append(
190
+ drawsvg.Text(
191
+ f'{int(abs(length))}', # Format the length to two decimal places
192
+ text_size,
193
+ text_x, text_y,
194
+ fill='black',
195
+ text_anchor='middle',
196
+ alignment_baseline='middle'
197
+ )
198
+ )
199
+
200
+ draw_color.append(
201
+ drawsvg.Lines(*np.array(poly).flatten().tolist(), close=True, fill=ID_COLOR[room_type],
202
+ fill_opacity=1.0, stroke='black', stroke_width=1))
203
+
204
+ if k == sample.shape[0] - 1 or True:
205
+ if save_svg:
206
+ # draw_color.saveSvg(f'outputs/{ext}/{tmp_count + i}c_{k}_{ext}.svg')
207
+ return draw_color
208
+ else:
209
+ Image.open(io.BytesIO(cairosvg.svg2png(draw_color.asSvg()))).save(
210
+ f'outputs/{ext}/{tmp_count + i}c_{ext}.png')
211
+
212
+ # if save_gif:
213
+ # imageio.mimwrite(f'outputs/gif/{tmp_count + i}.gif', images, fps=10, loop=1)
214
+ # imageio.mimwrite(f'outputs/gif/{tmp_count + i}_v2.gif', images2, fps=10, loop=1)
215
+ # imageio.mimwrite(f'outputs/gif/{tmp_count + i}_v3.gif', images3, fps=10, loop=1)
216
+ return graph_errors
217
+
218
+
219
+ def function_test(org_graphs, corners, room_type):
220
+ get_one_hot = lambda x, z: np.eye(z)[x]
221
+ max_num_points = 100
222
+
223
+ house = []
224
+ corner_bounds = []
225
+ num_points = 0
226
+
227
+ for i, room in enumerate(room_type):
228
+ # Adding conditions
229
+ num_room_corners = corners[i]
230
+ rtype = np.repeat(np.array([get_one_hot(room, 25)]), num_room_corners, 0)
231
+ room_index = np.repeat(np.array([get_one_hot(len(house) + 1, 32)]), num_room_corners, 0)
232
+ corner_index = np.array([get_one_hot(x, 32) for x in range(num_room_corners)])
233
+ # Src_key_padding_mask
234
+ padding_mask = np.repeat(1, num_room_corners)
235
+ padding_mask = np.expand_dims(padding_mask, 1)
236
+ # Generating corner bounds for attention masks
237
+ connections = np.array([[i, (i + 1) % num_room_corners] for i in range(num_room_corners)])
238
+ connections += num_points
239
+ corner_bounds.append([num_points, num_points + num_room_corners])
240
+ num_points += num_room_corners
241
+ room = np.concatenate((np.zeros([num_room_corners, 2]), rtype, corner_index, room_index,
242
+ padding_mask, connections), 1)
243
+ house.append(room)
244
+
245
+ house_layouts = np.concatenate(house, 0)
246
+ padding = np.zeros((max_num_points - len(house_layouts), 94))
247
+ gen_mask = np.ones((max_num_points, max_num_points))
248
+ gen_mask[:len(house_layouts), :len(house_layouts)] = 0
249
+ house_layouts = np.concatenate((house_layouts, padding), 0)
250
+
251
+ door_mask = np.ones((max_num_points, max_num_points))
252
+ self_mask = np.ones((max_num_points, max_num_points))
253
+ for i, room in enumerate(room_type):
254
+ if room == 1:
255
+ living_room_index = i
256
+ break
257
+ for i in range(len(corner_bounds)):
258
+ is_connected = False
259
+ for j in range(len(corner_bounds)):
260
+ if i == j:
261
+ self_mask[corner_bounds[i][0]:corner_bounds[i][1], corner_bounds[j][0]:corner_bounds[j][1]] = 0
262
+ elif any(np.equal([i, 1, j], org_graphs).all(1)) or any(np.equal([j, 1, i], org_graphs).all(1)):
263
+ door_mask[corner_bounds[i][0]:corner_bounds[i][1], corner_bounds[j][0]:corner_bounds[j][1]] = 0
264
+ is_connected = True
265
+ if not is_connected:
266
+ door_mask[corner_bounds[i][0]:corner_bounds[i][1],
267
+ corner_bounds[living_room_index][0]:corner_bounds[living_room_index][1]] = 0
268
+
269
+ syn_houses = house_layouts
270
+ syn_door_masks = door_mask
271
+ syn_self_masks = self_mask
272
+ syn_gen_masks = gen_mask
273
+
274
+ syn_graph = np.concatenate((org_graphs, np.zeros([200 - len(org_graphs), 3])), 0)
275
+
276
+ cond = {
277
+ 'syn_door_mask': syn_door_masks,
278
+ 'syn_self_mask': syn_self_masks,
279
+ 'syn_gen_mask': syn_gen_masks,
280
+ 'syn_room_types': syn_houses[:, 2:2 + 25],
281
+ 'syn_corner_indices': syn_houses[:, 2 + 25:2 + 57],
282
+ 'syn_room_indices': syn_houses[:, 2 + 57:2 + 89],
283
+ 'syn_src_key_padding_mask': 1 - syn_houses[:, 2 + 89],
284
+ 'syn_connections': syn_houses[:, 2 + 90:2 + 92],
285
+ 'syn_graph': syn_graph,
286
+ }
287
+
288
+ return cond
289
+
290
+
291
+ def create_layout(graphs, corners, room_type, metrics=False, use_ddim=True, ddim_steps=100, num_samples=4):
292
+ model_path = "ckpt/model250000.pt"
293
+ steps = f"ddim{ddim_steps}"
294
+ args = {
295
+ "input_channels": 18,
296
+ "condition_channels": 89,
297
+ "num_channels": 512,
298
+ "out_channels": 2,
299
+ "dataset": "rplan",
300
+ "use_checkpoint": False,
301
+ "use_unet": False,
302
+ "learn_sigma": False,
303
+ "diffusion_steps": 1000,
304
+ "noise_schedule": "cosine",
305
+ "timestep_respacing": steps,
306
+ "use_kl": False,
307
+ "predict_xstart": False,
308
+ "rescale_timesteps": False,
309
+ "rescale_learned_sigmas": False,
310
+ "analog_bit": False,
311
+ "target_set": -1,
312
+ "set_name": "",
313
+ }
314
+
315
+ dist_util.setup_dist()
316
+ model, diffusion = create_model_and_diffusion(
317
+ args['input_channels'],
318
+ args['condition_channels'],
319
+ args['num_channels'],
320
+ args['out_channels'],
321
+ args['dataset'],
322
+ args['use_checkpoint'],
323
+ args['use_unet'],
324
+ args['learn_sigma'],
325
+ args['diffusion_steps'],
326
+ args['noise_schedule'],
327
+ args['timestep_respacing'],
328
+ args['use_kl'],
329
+ args['predict_xstart'],
330
+ args['rescale_timesteps'],
331
+ args['rescale_learned_sigmas'],
332
+ args['analog_bit'],
333
+ args['target_set'],
334
+ args['set_name'],
335
+ )
336
+ model.load_state_dict(
337
+ dist_util.load_state_dict(model_path, map_location="cpu")
338
+ )
339
+ model.to(dist_util.dev())
340
+ model.eval()
341
+ ID_COLOR = {1: '#EE4D4D', 2: '#C67C7B', 3: '#FFD274', 4: '#BEBEBE', 5: '#BFE3E8',
342
+ 6: '#7BA779', 7: '#E87A90', 8: '#FF8C69', 10: '#1F849B', 11: '#727171',
343
+ 13: '#785A67', 12: '#D3A2C7'}
344
+ num_room_types = 14
345
+ sample_fn = (diffusion.p_sample_loop if not use_ddim else diffusion.ddim_sample_loop)
346
+ print(graphs, corners, room_type)
347
+ model_kwargs = function_test(graphs, corners, room_type)
348
+ for key in model_kwargs:
349
+ model_kwargs[key] = th.from_numpy(np.array([model_kwargs[key]])).cuda()
350
+
351
+ png_paths = []
352
+ svg_paths = []
353
+ for count in range(num_samples):
354
+ sample = sample_fn(
355
+ model,
356
+ th.Size([1, 2, 100]),
357
+ clip_denoised=True,
358
+ model_kwargs=model_kwargs,
359
+ )
360
+
361
+ sample = sample.permute([0, 1, 3, 2])
362
+
363
+ pred = save_samples(sample, 'pred', model_kwargs, count, num_room_types, ID_COLOR=ID_COLOR,
364
+ is_syn=True, draw_graph=False, save_svg=True, save_gif=False, metrics=metrics)
365
+
366
+ temp_svg_file = tempfile.NamedTemporaryFile(delete=False, suffix=".svg")
367
+ pred.saveSvg(temp_svg_file.name)
368
+ png_file_name = temp_svg_file.name.split(".")[0].split("/")[-1]
369
+ png_file_path = f'./generated_svgs/{png_file_name}.png'
370
+ # print(temp_svg_file.name)
371
+ # print(png_file_name)
372
+ # print(png_file_path)
373
+
374
+ Image.open(io.BytesIO(cairosvg.svg2png(pred.asSvg()))).save(png_file_path)
375
+
376
+ output_dir = Path("./generated_svgs")
377
+ output_dir.mkdir(parents=True, exist_ok=True)
378
+ file_name = temp_svg_file.name.split("/")[-1]
379
+ persistent_path = Path(f"{output_dir}/{file_name}")
380
+ shutil.move(temp_svg_file.name, persistent_path)
381
+ os.chmod(persistent_path, 0o644)
382
+
383
+ svg_paths.append(str(persistent_path))
384
+ png_paths.append(png_file_path)
385
+ # print(str(persistent_path))
386
+
387
+ return png_paths, svg_paths
388
+
389
+
390
+ rooms_data = []
391
+ edges_data = []
392
+
393
+
394
+ def generate_layout(metrics: bool, ddim_steps: int, num_samples: int):
395
+ room_list = []
396
+ room_corners = []
397
+ living_room = 0
398
+ front_door = False
399
+ entrance = -1
400
+
401
+ print(rooms_data)
402
+ print(edges_data)
403
+
404
+ for i, room in enumerate(rooms_data):
405
+ room_list.append(ROOM_CLASS[room['room_type']])
406
+ if room['num_corners'] != 0:
407
+ room_corners.append(int(room['num_corners']))
408
+ else:
409
+ room_corners.append(4)
410
+
411
+ if room['room_type'] == "Living Room":
412
+ living_room = i
413
+
414
+ elif room['room_type'] == "Entrance":
415
+ entrance = i
416
+
417
+ elif room['room_type'] == "Front Door":
418
+ front_door = True
419
+
420
+ edges = []
421
+ for edge in edges_data:
422
+ source_id = int(edge['room1_id'].split()[0])
423
+ target_id = int(edge['room2_id'].split()[0])
424
+ edges.append([source_id, 1, target_id])
425
+
426
+ index = len(room_list)
427
+ room_list.append(12)
428
+ room_corners.append(4)
429
+ edges.append([source_id, 1, index])
430
+ edges.append([target_id, 1, index])
431
+
432
+ if not front_door:
433
+ room_list.append(11)
434
+ room_corners.append(4)
435
+ if entrance == -1:
436
+ edges.append([len(room_list) - 1, 1, living_room])
437
+ else:
438
+ edges.append([len(room_list) - 1, 1, entrance])
439
+
440
+ if np.sum(room_corners) > 99:
441
+ return {"Error": "Number of Corners exceeded"}
442
+
443
+ print(room_list, room_corners, edges)
444
+ png_paths, svg_paths = create_layout(edges, room_corners, room_list, metrics=metrics, ddim_steps=ddim_steps,
445
+ num_samples=num_samples)
446
+
447
+ png_color_guide = './color_guide.png'
448
+
449
+ return png_paths, svg_paths, png_color_guide
450
+
451
+
452
+ with gr.Blocks() as demo:
453
+ gr.Markdown("## House Layout Generator")
454
+
455
+ with gr.Row():
456
+ room_type = gr.Dropdown(label="Room Type", choices=list(ROOM_CATEGORIES.keys()), value="Living Room")
457
+ num_corners = gr.Number(label="Number of Corners", value=4)
458
+ add_room_button = gr.Button("Add Room")
459
+
460
+ with gr.Row():
461
+ room1_id = gr.Dropdown(label="Room 1", choices=[], value=None)
462
+ room2_id = gr.Dropdown(label="Room 2", choices=[], value=None)
463
+ add_edge_button = gr.Button("Add Edge")
464
+
465
+ rooms_table = gr.DataFrame(label="Rooms Table")
466
+ edges_table = gr.DataFrame(label="Edges Table")
467
+
468
+ metrics_toggle = gr.Checkbox(label="Include metrics", value=True)
469
+ ddim_input = gr.Number(label="DDIM steps", value=100)
470
+ num_sample = gr.Number(label="Number of samples", value=4)
471
+
472
+ png_gallery = gr.Gallery(label="Layout PNG Outputs", columns=4)
473
+ svg_files = gr.File(label="Layout SVG Outputs (higher quality)")
474
+ png_color_guide = gr.Image(label="Color Guide")
475
+
476
+ def add_room(room_type, num_corners):
477
+ room_id = len(rooms_data)
478
+ rooms_data.append({
479
+ "room_id": room_id,
480
+ "room_type": room_type,
481
+ "num_corners": num_corners
482
+ })
483
+ return update_rooms_and_edges()
484
+
485
+
486
+ def add_edge(room1_id, room2_id):
487
+ edge_id = len(edges_data)
488
+ edges_data.append({
489
+ "edge_id": edge_id,
490
+ "room1_id": room1_id,
491
+ "room2_id": room2_id
492
+ })
493
+ return update_rooms_and_edges()
494
+
495
+
496
+ def update_rooms_and_edges():
497
+ rooms_df = pd.DataFrame(rooms_data, columns=["room_id", "room_type", "num_corners"])
498
+ edges_df = pd.DataFrame(edges_data, columns=["edge_id", "room1_id", "room2_id"])
499
+ room_options = [f"{room['room_id']} {room['room_type']}" for room in rooms_data]
500
+ return rooms_df, edges_df, gr.update(choices=room_options, value=None), gr.update(choices=room_options,
501
+ value=None)
502
+
503
+
504
+ generate_button = gr.Button("Generate Layout")
505
+ generate_button.click(generate_layout, inputs=[metrics_toggle, ddim_input, num_sample], outputs=[png_gallery, svg_files, png_color_guide])
506
+
507
+ add_room_button.click(add_room, inputs=[room_type, num_corners],
508
+ outputs=[rooms_table, edges_table, room1_id, room2_id])
509
+ add_edge_button.click(add_edge, inputs=[room1_id, room2_id], outputs=[rooms_table, edges_table, room1_id, room2_id])
510
+
511
+
512
+ demo.launch()
513
+ # global demo
514
+ # demo.launch(share=True)
ckpt/model250000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b22c916ec8d5fce087ce9bfe277817a93d60ca2eed9f8bbcf4b8eefec43797a
3
+ size 106240205
ckpt/readme.txt ADDED
File without changes
color_guide.png ADDED
generated_svgs/readme.txt ADDED
File without changes
house_diffusion/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ """
2
+ Codebase for "HouseDiffusion" based on the implementation from "Improved Denoising Diffusion Probabilistic Models".
3
+ """
house_diffusion/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (310 Bytes). View file
 
house_diffusion/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (297 Bytes). View file
 
house_diffusion/__pycache__/dist_util.cpython-311.pyc ADDED
Binary file (5 kB). View file
 
house_diffusion/__pycache__/dist_util.cpython-39.pyc ADDED
Binary file (2.48 kB). View file
 
house_diffusion/__pycache__/fp16_util.cpython-311.pyc ADDED
Binary file (14.1 kB). View file
 
house_diffusion/__pycache__/fp16_util.cpython-39.pyc ADDED
Binary file (7.77 kB). View file
 
house_diffusion/__pycache__/gaussian_diffusion.cpython-311.pyc ADDED
Binary file (44.3 kB). View file
 
house_diffusion/__pycache__/gaussian_diffusion.cpython-39.pyc ADDED
Binary file (26.5 kB). View file
 
house_diffusion/__pycache__/logger.cpython-311.pyc ADDED
Binary file (27.5 kB). View file
 
house_diffusion/__pycache__/logger.cpython-39.pyc ADDED
Binary file (15.5 kB). View file
 
house_diffusion/__pycache__/losses.cpython-311.pyc ADDED
Binary file (3.99 kB). View file
 
house_diffusion/__pycache__/losses.cpython-39.pyc ADDED
Binary file (2.52 kB). View file
 
house_diffusion/__pycache__/nn.cpython-311.pyc ADDED
Binary file (9.8 kB). View file
 
house_diffusion/__pycache__/nn.cpython-39.pyc ADDED
Binary file (6.02 kB). View file
 
house_diffusion/__pycache__/resample.cpython-311.pyc ADDED
Binary file (10.9 kB). View file
 
house_diffusion/__pycache__/resample.cpython-39.pyc ADDED
Binary file (6.84 kB). View file
 
house_diffusion/__pycache__/respace.cpython-311.pyc ADDED
Binary file (7.98 kB). View file
 
house_diffusion/__pycache__/respace.cpython-39.pyc ADDED
Binary file (5.17 kB). View file
 
house_diffusion/__pycache__/rplanhg_datasets.cpython-311.pyc ADDED
Binary file (36.9 kB). View file
 
house_diffusion/__pycache__/rplanhg_datasets.cpython-39.pyc ADDED
Binary file (15.4 kB). View file
 
house_diffusion/__pycache__/script_util.cpython-311.pyc ADDED
Binary file (5.66 kB). View file
 
house_diffusion/__pycache__/script_util.cpython-39.pyc ADDED
Binary file (3.68 kB). View file
 
house_diffusion/__pycache__/train_util.cpython-311.pyc ADDED
Binary file (18 kB). View file
 
house_diffusion/__pycache__/train_util.cpython-39.pyc ADDED
Binary file (11.1 kB). View file
 
house_diffusion/__pycache__/transformer.cpython-311.pyc ADDED
Binary file (21.9 kB). View file
 
house_diffusion/__pycache__/transformer.cpython-39.pyc ADDED
Binary file (9.69 kB). View file
 
house_diffusion/__pycache__/transformer_models.cpython-39.pyc ADDED
Binary file (6.05 kB). View file
 
house_diffusion/dist_util.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for distributed training.
3
+ """
4
+
5
+ import io
6
+ import os
7
+ import socket
8
+
9
+ import blobfile as bf
10
+ from mpi4py import MPI
11
+ import torch as th
12
+ import torch.distributed as dist
13
+
14
+ # Change this to reflect your cluster layout.
15
+ # The GPU for a given rank is (rank % GPUS_PER_NODE).
16
+ GPUS_PER_NODE = 4
17
+
18
+ SETUP_RETRY_COUNT = 3
19
+
20
+
21
+ def setup_dist():
22
+ """
23
+ Setup a distributed process group.
24
+ """
25
+ if dist.is_initialized():
26
+ return
27
+ ## temporary removed to manually set the CUDA_VISIBLE_DEVICES
28
+ #os.environ["CUDA_VISIBLE_DEVICES"] = f"{MPI.COMM_WORLD.Get_rank() % GPUS_PER_NODE}"
29
+
30
+ comm = MPI.COMM_WORLD
31
+ backend = "gloo" if not th.cuda.is_available() else "nccl"
32
+
33
+ if backend == "gloo":
34
+ hostname = "localhost"
35
+ else:
36
+ hostname = socket.gethostbyname(socket.getfqdn())
37
+ os.environ["MASTER_ADDR"] = comm.bcast(hostname, root=0)
38
+ os.environ["RANK"] = str(comm.rank)
39
+ os.environ["WORLD_SIZE"] = str(comm.size)
40
+
41
+ port = comm.bcast(_find_free_port(), root=0)
42
+ os.environ["MASTER_PORT"] = str(port)
43
+ dist.init_process_group(backend=backend, init_method="env://")
44
+
45
+
46
+ def dev():
47
+ """
48
+ Get the device to use for torch.distributed.
49
+ """
50
+ if th.cuda.is_available():
51
+ return th.device(f"cuda")
52
+ return th.device("cpu")
53
+
54
+
55
+ def load_state_dict(path, **kwargs):
56
+ """
57
+ Load a PyTorch file without redundant fetches across MPI ranks.
58
+ """
59
+ chunk_size = 2 ** 30 # MPI has a relatively small size limit
60
+ if MPI.COMM_WORLD.Get_rank() == 0:
61
+ with bf.BlobFile(path, "rb") as f:
62
+ data = f.read()
63
+ num_chunks = len(data) // chunk_size
64
+ if len(data) % chunk_size:
65
+ num_chunks += 1
66
+ MPI.COMM_WORLD.bcast(num_chunks)
67
+ for i in range(0, len(data), chunk_size):
68
+ MPI.COMM_WORLD.bcast(data[i : i + chunk_size])
69
+ else:
70
+ num_chunks = MPI.COMM_WORLD.bcast(None)
71
+ data = bytes()
72
+ for _ in range(num_chunks):
73
+ data += MPI.COMM_WORLD.bcast(None)
74
+
75
+ return th.load(io.BytesIO(data), **kwargs)
76
+
77
+
78
+ def sync_params(params):
79
+ """
80
+ Synchronize a sequence of Tensors across ranks from rank 0.
81
+ """
82
+ for p in params:
83
+ with th.no_grad():
84
+ dist.broadcast(p, 0)
85
+
86
+
87
+ def _find_free_port():
88
+ try:
89
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
90
+ s.bind(("", 0))
91
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
92
+ return s.getsockname()[1]
93
+ finally:
94
+ s.close()
house_diffusion/fp16_util.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers to train with 16-bit precision.
3
+ """
4
+
5
+ import numpy as np
6
+ import torch as th
7
+ import torch.nn as nn
8
+ from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
9
+
10
+ from . import logger
11
+
12
+ INITIAL_LOG_LOSS_SCALE = 20.0
13
+
14
+
15
+ def convert_module_to_f16(l):
16
+ """
17
+ Convert primitive modules to float16.
18
+ """
19
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
20
+ l.weight.data = l.weight.data.half()
21
+ if l.bias is not None:
22
+ l.bias.data = l.bias.data.half()
23
+
24
+
25
+ def convert_module_to_f32(l):
26
+ """
27
+ Convert primitive modules to float32, undoing convert_module_to_f16().
28
+ """
29
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
30
+ l.weight.data = l.weight.data.float()
31
+ if l.bias is not None:
32
+ l.bias.data = l.bias.data.float()
33
+
34
+
35
+ def make_master_params(param_groups_and_shapes):
36
+ """
37
+ Copy model parameters into a (differently-shaped) list of full-precision
38
+ parameters.
39
+ """
40
+ master_params = []
41
+ for param_group, shape in param_groups_and_shapes:
42
+ master_param = nn.Parameter(
43
+ _flatten_dense_tensors(
44
+ [param.detach().float() for (_, param) in param_group]
45
+ ).view(shape)
46
+ )
47
+ master_param.requires_grad = True
48
+ master_params.append(master_param)
49
+ return master_params
50
+
51
+
52
+ def model_grads_to_master_grads(param_groups_and_shapes, master_params):
53
+ """
54
+ Copy the gradients from the model parameters into the master parameters
55
+ from make_master_params().
56
+ """
57
+ for master_param, (param_group, shape) in zip(
58
+ master_params, param_groups_and_shapes
59
+ ):
60
+ master_param.grad = _flatten_dense_tensors(
61
+ [param_grad_or_zeros(param) for (_, param) in param_group]
62
+ ).view(shape)
63
+
64
+
65
+ def master_params_to_model_params(param_groups_and_shapes, master_params):
66
+ """
67
+ Copy the master parameter data back into the model parameters.
68
+ """
69
+ # Without copying to a list, if a generator is passed, this will
70
+ # silently not copy any parameters.
71
+ for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
72
+ for (_, param), unflat_master_param in zip(
73
+ param_group, unflatten_master_params(param_group, master_param.view(-1))
74
+ ):
75
+ param.detach().copy_(unflat_master_param)
76
+
77
+
78
+ def unflatten_master_params(param_group, master_param):
79
+ return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
80
+
81
+
82
+ def get_param_groups_and_shapes(named_model_params):
83
+ named_model_params = list(named_model_params)
84
+ scalar_vector_named_params = (
85
+ [(n, p) for (n, p) in named_model_params if p.ndim <= 1],
86
+ (-1),
87
+ )
88
+ matrix_named_params = (
89
+ [(n, p) for (n, p) in named_model_params if p.ndim > 1],
90
+ (1, -1),
91
+ )
92
+ return [scalar_vector_named_params, matrix_named_params]
93
+
94
+
95
+ def master_params_to_state_dict(
96
+ model, param_groups_and_shapes, master_params, use_fp16
97
+ ):
98
+ if use_fp16:
99
+ state_dict = model.state_dict()
100
+ for master_param, (param_group, _) in zip(
101
+ master_params, param_groups_and_shapes
102
+ ):
103
+ for (name, _), unflat_master_param in zip(
104
+ param_group, unflatten_master_params(param_group, master_param.view(-1))
105
+ ):
106
+ assert name in state_dict
107
+ state_dict[name] = unflat_master_param
108
+ else:
109
+ state_dict = model.state_dict()
110
+ for i, (name, _value) in enumerate(model.named_parameters()):
111
+ assert name in state_dict
112
+ state_dict[name] = master_params[i]
113
+ return state_dict
114
+
115
+
116
+ def state_dict_to_master_params(model, state_dict, use_fp16):
117
+ if use_fp16:
118
+ named_model_params = [
119
+ (name, state_dict[name]) for name, _ in model.named_parameters()
120
+ ]
121
+ param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
122
+ master_params = make_master_params(param_groups_and_shapes)
123
+ else:
124
+ master_params = [state_dict[name] for name, _ in model.named_parameters()]
125
+ return master_params
126
+
127
+
128
+ def zero_master_grads(master_params):
129
+ for param in master_params:
130
+ param.grad = None
131
+
132
+
133
+ def zero_grad(model_params):
134
+ for param in model_params:
135
+ # Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
136
+ if param.grad is not None:
137
+ param.grad.detach_()
138
+ param.grad.zero_()
139
+
140
+
141
+ def param_grad_or_zeros(param):
142
+ if param.grad is not None:
143
+ return param.grad.data.detach()
144
+ else:
145
+ return th.zeros_like(param)
146
+
147
+
148
+ class MixedPrecisionTrainer:
149
+ def __init__(
150
+ self,
151
+ *,
152
+ model,
153
+ use_fp16=False,
154
+ fp16_scale_growth=1e-3,
155
+ initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
156
+ ):
157
+ self.model = model
158
+ self.use_fp16 = use_fp16
159
+ self.fp16_scale_growth = fp16_scale_growth
160
+
161
+ self.model_params = list(self.model.parameters())
162
+ self.master_params = self.model_params
163
+ self.param_groups_and_shapes = None
164
+ self.lg_loss_scale = initial_lg_loss_scale
165
+
166
+ if self.use_fp16:
167
+ self.param_groups_and_shapes = get_param_groups_and_shapes(
168
+ self.model.named_parameters()
169
+ )
170
+ self.master_params = make_master_params(self.param_groups_and_shapes)
171
+ self.model.convert_to_fp16()
172
+
173
+ def zero_grad(self):
174
+ zero_grad(self.model_params)
175
+
176
+ def backward(self, loss: th.Tensor):
177
+ if self.use_fp16:
178
+ loss_scale = 2 ** self.lg_loss_scale
179
+ (loss * loss_scale).backward()
180
+ else:
181
+ loss.backward()
182
+
183
+ def optimize(self, opt: th.optim.Optimizer):
184
+ if self.use_fp16:
185
+ return self._optimize_fp16(opt)
186
+ else:
187
+ return self._optimize_normal(opt)
188
+
189
+ def _optimize_fp16(self, opt: th.optim.Optimizer):
190
+ logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
191
+ model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
192
+ grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale)
193
+ if check_overflow(grad_norm):
194
+ self.lg_loss_scale -= 1
195
+ logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
196
+ zero_master_grads(self.master_params)
197
+ return False
198
+
199
+ logger.logkv_mean("grad_norm", grad_norm)
200
+ logger.logkv_mean("param_norm", param_norm)
201
+
202
+ self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
203
+ opt.step()
204
+ zero_master_grads(self.master_params)
205
+ master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
206
+ self.lg_loss_scale += self.fp16_scale_growth
207
+ return True
208
+
209
+ def _optimize_normal(self, opt: th.optim.Optimizer):
210
+ grad_norm, param_norm = self._compute_norms()
211
+ logger.logkv_mean("grad_norm", grad_norm)
212
+ logger.logkv_mean("param_norm", param_norm)
213
+ opt.step()
214
+ return True
215
+
216
+ def _compute_norms(self, grad_scale=1.0):
217
+ grad_norm = 0.0
218
+ param_norm = 0.0
219
+ for p in self.master_params:
220
+ with th.no_grad():
221
+ param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
222
+ if p.grad is not None:
223
+ grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
224
+ return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
225
+
226
+ def master_params_to_state_dict(self, master_params):
227
+ return master_params_to_state_dict(
228
+ self.model, self.param_groups_and_shapes, master_params, self.use_fp16
229
+ )
230
+
231
+ def state_dict_to_master_params(self, state_dict):
232
+ return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
233
+
234
+
235
+ def check_overflow(value):
236
+ return (value == float("inf")) or (value == -float("inf")) or (value != value)
house_diffusion/gaussian_diffusion.py ADDED
@@ -0,0 +1,1013 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This code started out as a PyTorch port of Ho et al's diffusion models:
3
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
4
+
5
+ Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
6
+ """
7
+
8
+ import enum
9
+ import math
10
+
11
+ import numpy as np
12
+ import torch as th
13
+
14
+ from .nn import mean_flat
15
+ from .losses import normal_kl, discretized_gaussian_log_likelihood
16
+ from tqdm.auto import tqdm
17
+
18
+
19
+ def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
20
+ """
21
+ Get a pre-defined beta schedule for the given name.
22
+
23
+ The beta schedule library consists of beta schedules which remain similar
24
+ in the limit of num_diffusion_timesteps.
25
+ Beta schedules may be added, but should not be removed or changed once
26
+ they are committed to maintain backwards compatibility.
27
+ """
28
+ if schedule_name == "linear":
29
+ # Linear schedule from Ho et al, extended to work for any number of
30
+ # diffusion steps.
31
+ scale = 1000 / num_diffusion_timesteps
32
+ beta_start = scale * 0.0001
33
+ beta_end = scale * 0.02
34
+ return np.linspace(
35
+ beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
36
+ )
37
+ elif schedule_name == "cosine":
38
+ print("COSINE")
39
+ return betas_for_alpha_bar(
40
+ num_diffusion_timesteps,
41
+ # lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
42
+ lambda t: math.cos((t) / 1.000 * math.pi / 2) ** 2,
43
+ )
44
+ else:
45
+ raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
46
+
47
+
48
+ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
49
+ """
50
+ Create a beta schedule that discretizes the given alpha_t_bar function,
51
+ which defines the cumulative product of (1-beta) over time from t = [0,1].
52
+
53
+ :param num_diffusion_timesteps: the number of betas to produce.
54
+ :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
55
+ produces the cumulative product of (1-beta) up to that
56
+ part of the diffusion process.
57
+ :param max_beta: the maximum beta to use; use values lower than 1 to
58
+ prevent singularities.
59
+ """
60
+ betas = []
61
+ for i in range(num_diffusion_timesteps):
62
+ t1 = i / num_diffusion_timesteps
63
+ t2 = (i + 1) / num_diffusion_timesteps
64
+ betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
65
+ return np.array(betas)
66
+
67
+
68
+ class ModelMeanType(enum.Enum):
69
+ """
70
+ Which type of output the model predicts.
71
+ """
72
+
73
+ PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
74
+ START_X = enum.auto() # the model predicts x_0
75
+ EPSILON = enum.auto() # the model predicts epsilon
76
+
77
+
78
+ class ModelVarType(enum.Enum):
79
+ """
80
+ What is used as the model's output variance.
81
+
82
+ The LEARNED_RANGE option has been added to allow the model to predict
83
+ values between FIXED_SMALL and FIXED_LARGE, making its job easier.
84
+ """
85
+
86
+ LEARNED = enum.auto()
87
+ FIXED_SMALL = enum.auto()
88
+ FIXED_LARGE = enum.auto()
89
+ LEARNED_RANGE = enum.auto()
90
+
91
+
92
+ class LossType(enum.Enum):
93
+ MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
94
+ RESCALED_MSE = (
95
+ enum.auto()
96
+ ) # use raw MSE loss (with RESCALED_KL when learning variances)
97
+ KL = enum.auto() # use the variational lower-bound
98
+ RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
99
+
100
+ def is_vb(self):
101
+ return self == LossType.KL or self == LossType.RESCALED_KL
102
+
103
+
104
+ class GaussianDiffusion:
105
+ """
106
+ Utilities for training and sampling diffusion models.
107
+
108
+ Ported directly from here, and then adapted over time to further experimentation.
109
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
110
+
111
+ :param betas: a 1-D numpy array of betas for each diffusion timestep,
112
+ starting at T and going to 1.
113
+ :param model_mean_type: a ModelMeanType determining what the model outputs.
114
+ :param model_var_type: a ModelVarType determining how variance is output.
115
+ :param loss_type: a LossType determining the loss function to use.
116
+ :param rescale_timesteps: if True, pass floating point timesteps into the
117
+ model so that they are always scaled like in the
118
+ original paper (0 to 1000).
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ *,
124
+ betas,
125
+ model_mean_type,
126
+ model_var_type,
127
+ loss_type,
128
+ rescale_timesteps=False,
129
+ ):
130
+ self.model_mean_type = model_mean_type
131
+ self.model_var_type = model_var_type
132
+ self.loss_type = loss_type
133
+ self.rescale_timesteps = rescale_timesteps
134
+
135
+ # Use float64 for accuracy.
136
+ betas = np.array(betas, dtype=np.float64)
137
+ self.betas = betas
138
+ assert len(betas.shape) == 1, "betas must be 1-D"
139
+ assert (betas > 0).all() and (betas <= 1).all()
140
+
141
+ self.num_timesteps = int(betas.shape[0])
142
+
143
+ alphas = 1.0 - betas
144
+ self.alphas_cumprod = np.cumprod(alphas, axis=0)
145
+ self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
146
+ self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
147
+ assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
148
+
149
+ # calculations for diffusion q(x_t | x_{t-1}) and others
150
+ self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
151
+ self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
152
+ self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
153
+ self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
154
+ self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
155
+
156
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
157
+ self.posterior_variance = (
158
+ betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
159
+ )
160
+ # log calculation clipped because the posterior variance is 0 at the
161
+ # beginning of the diffusion chain.
162
+ self.posterior_log_variance_clipped = np.log(
163
+ np.append(self.posterior_variance[1], self.posterior_variance[1:])
164
+ )
165
+ self.posterior_mean_coef1 = (
166
+ betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
167
+ )
168
+ self.posterior_mean_coef2 = (
169
+ (1.0 - self.alphas_cumprod_prev)
170
+ * np.sqrt(alphas)
171
+ / (1.0 - self.alphas_cumprod)
172
+ )
173
+
174
+ def q_mean_variance(self, x_start, t):
175
+ """
176
+ Get the distribution q(x_t | x_0).
177
+
178
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
179
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
180
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
181
+ """
182
+ mean = (
183
+ _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
184
+ )
185
+ variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
186
+ log_variance = _extract_into_tensor(
187
+ self.log_one_minus_alphas_cumprod, t, x_start.shape
188
+ )
189
+ return mean, variance, log_variance
190
+
191
+ def q_sample(self, x_start, t, noise=None):
192
+ """
193
+ Diffuse the data for a given number of diffusion steps.
194
+
195
+ In other words, sample from q(x_t | x_0).
196
+
197
+ :param x_start: the initial data batch.
198
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
199
+ :param noise: if specified, the split-out normal noise.
200
+ :return: A noisy version of x_start.
201
+ """
202
+ if noise is None:
203
+ noise = th.randn_like(x_start)
204
+ assert noise.shape == x_start.shape
205
+ return (
206
+ _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
207
+ + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
208
+ * noise
209
+ )
210
+
211
+ def q_posterior_mean_variance(self, x_start, x_t, t):
212
+ """
213
+ Compute the mean and variance of the diffusion posterior:
214
+
215
+ q(x_{t-1} | x_t, x_0)
216
+
217
+ """
218
+ assert x_start.shape == x_t.shape
219
+ posterior_mean = (
220
+ _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
221
+ + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
222
+ )
223
+ posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
224
+ posterior_log_variance_clipped = _extract_into_tensor(
225
+ self.posterior_log_variance_clipped, t, x_t.shape
226
+ )
227
+ assert (
228
+ posterior_mean.shape[0]
229
+ == posterior_variance.shape[0]
230
+ == posterior_log_variance_clipped.shape[0]
231
+ == x_start.shape[0]
232
+ )
233
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
234
+
235
+ def p_mean_variance(
236
+ self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, analog_bit=None
237
+ ):
238
+ """
239
+ Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
240
+ the initial x, x_0.
241
+
242
+ :param model: the model, which takes a signal and a batch of timesteps
243
+ as input.
244
+ :param x: the [N x C x ...] tensor at time t.
245
+ :param t: a 1-D Tensor of timesteps.
246
+ :param clip_denoised: if True, clip the denoised signal into [-1, 1].
247
+ :param denoised_fn: if not None, a function which applies to the
248
+ x_start prediction before it is used to sample. Applies before
249
+ clip_denoised.
250
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
251
+ pass to the model. This can be used for conditioning.
252
+ :return: a dict with the following keys:
253
+ - 'mean': the model mean output.
254
+ - 'variance': the model variance output.
255
+ - 'log_variance': the log of 'variance'.
256
+ - 'pred_xstart': the prediction for x_0.
257
+ """
258
+ if model_kwargs is None:
259
+ model_kwargs = {}
260
+
261
+ B, C = x.shape[:2]
262
+ assert t.shape == (B,)
263
+ xtalpha = _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape).permute([0,2,1])
264
+ epsalpha = _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape).permute([0,2,1])
265
+ # print("TTTTTTTTTTTTTTTTTTTT", t)
266
+ model_output_dec, model_output_bin = model(x, self._scale_timesteps(t), xtalpha=xtalpha, epsalpha=epsalpha, is_syn=True, **model_kwargs)
267
+ model_output = model_output_dec
268
+
269
+ if analog_bit:
270
+ predict_descrete = 0
271
+ else:
272
+ predict_descrete = 32
273
+
274
+ if t[0] < predict_descrete:
275
+ def bin2dec(b, bits):
276
+ mask = 2 ** th.arange(bits - 1, -1, -1).to(b.device, b.dtype)
277
+ return th.sum(mask * b, -1)
278
+ model_output_bin[model_output_bin>0] = 1
279
+ model_output_bin[model_output_bin<=0] = 0
280
+ model_output_bin = bin2dec(model_output_bin.round().int().permute([0,2,1]).reshape(model_output_bin.shape[0],
281
+ model_output_bin.shape[2], 2, 8), 8).permute([0,2,1])
282
+
283
+ model_output_bin = ((model_output_bin/256) - 0.5) * 2
284
+ model_output = model_output_bin
285
+
286
+ if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
287
+ assert model_output.shape == (B, C * 2, *x.shape[2:])
288
+ model_output, model_var_values = th.split(model_output, C, dim=1)
289
+ if self.model_var_type == ModelVarType.LEARNED:
290
+ model_log_variance = model_var_values
291
+ model_variance = th.exp(model_log_variance)
292
+ else:
293
+ min_log = _extract_into_tensor(
294
+ self.posterior_log_variance_clipped, t, x.shape
295
+ )
296
+ max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
297
+ # The model_var_values is [-1, 1] for [min_var, max_var].
298
+ frac = (model_var_values + 1) / 2
299
+ model_log_variance = frac * max_log + (1 - frac) * min_log
300
+ model_variance = th.exp(model_log_variance)
301
+ else:
302
+ model_variance, model_log_variance = {
303
+ # for fixedlarge, we set the initial (log-)variance like so
304
+ # to get a better decoder log likelihood.
305
+ ModelVarType.FIXED_LARGE: (
306
+ np.append(self.posterior_variance[1], self.betas[1:]),
307
+ np.log(np.append(self.posterior_variance[1], self.betas[1:])),
308
+ ),
309
+ ModelVarType.FIXED_SMALL: (
310
+ self.posterior_variance,
311
+ self.posterior_log_variance_clipped,
312
+ ),
313
+ }[self.model_var_type]
314
+ model_variance = _extract_into_tensor(model_variance, t, x.shape)
315
+ model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
316
+
317
+ def process_xstart(x):
318
+ if denoised_fn is not None:
319
+ x = denoised_fn(x)
320
+ if clip_denoised:
321
+ return x.clamp(-1, 1)
322
+ return x
323
+
324
+ if t[0] >= predict_descrete:
325
+ if self.model_mean_type == ModelMeanType.PREVIOUS_X:
326
+ pred_xstart = process_xstart(
327
+ self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
328
+ )
329
+ model_mean = model_output
330
+ elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
331
+ if self.model_mean_type == ModelMeanType.START_X:
332
+ pred_xstart = process_xstart(model_output)
333
+ else:
334
+ pred_xstart = process_xstart(
335
+ self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
336
+ )
337
+ model_mean, _, _ = self.q_posterior_mean_variance(
338
+ x_start=pred_xstart, x_t=x, t=t
339
+ )
340
+ else:
341
+ raise NotImplementedError(self.model_mean_type)
342
+ else:
343
+ pred_xstart = process_xstart(model_output)
344
+ model_mean, _, _ = self.q_posterior_mean_variance(
345
+ x_start=pred_xstart, x_t=x, t=t
346
+ )
347
+
348
+ assert (
349
+ model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
350
+ )
351
+ return {
352
+ "mean": model_mean,
353
+ "variance": model_variance,
354
+ "log_variance": model_log_variance,
355
+ "pred_xstart": pred_xstart,
356
+ }
357
+
358
+ def _predict_xstart_from_eps(self, x_t, t, eps):
359
+ assert x_t.shape == eps.shape
360
+ return (
361
+ _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
362
+ - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
363
+ )
364
+
365
+ def _predict_xstart_from_xprev(self, x_t, t, xprev):
366
+ assert x_t.shape == xprev.shape
367
+ return ( # (xprev - coef2*x_t) / coef1
368
+ _extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
369
+ - _extract_into_tensor(
370
+ self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
371
+ )
372
+ * x_t
373
+ )
374
+
375
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
376
+ return (
377
+ _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
378
+ - pred_xstart
379
+ ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
380
+
381
+ def _scale_timesteps(self, t):
382
+ if self.rescale_timesteps:
383
+ return t.float() * (1000.0 / self.num_timesteps)
384
+ return t
385
+
386
+ def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
387
+ """
388
+ Compute the mean for the previous step, given a function cond_fn that
389
+ computes the gradient of a conditional log probability with respect to
390
+ x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
391
+ condition on y.
392
+
393
+ This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
394
+ """
395
+ gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
396
+ new_mean = (
397
+ p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
398
+ )
399
+ return new_mean
400
+
401
+ def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
402
+ """
403
+ Compute what the p_mean_variance output would have been, should the
404
+ model's score function be conditioned by cond_fn.
405
+
406
+ See condition_mean() for details on cond_fn.
407
+
408
+ Unlike condition_mean(), this instead uses the conditioning strategy
409
+ from Song et al (2020).
410
+ """
411
+ alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
412
+
413
+ eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
414
+ eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
415
+ x, self._scale_timesteps(t), **model_kwargs
416
+ )
417
+
418
+ out = p_mean_var.copy()
419
+ out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
420
+ out["mean"], _, _ = self.q_posterior_mean_variance(
421
+ x_start=out["pred_xstart"], x_t=x, t=t
422
+ )
423
+ return out
424
+
425
+ def p_sample(
426
+ self,
427
+ model,
428
+ x,
429
+ t,
430
+ clip_denoised=True,
431
+ denoised_fn=None,
432
+ cond_fn=None,
433
+ model_kwargs=None,
434
+ analog_bit=None,
435
+ ):
436
+ """
437
+ Sample x_{t-1} from the model at the given timestep.
438
+
439
+ :param model: the model to sample from.
440
+ :param x: the current tensor at x_{t-1}.
441
+ :param t: the value of t, starting at 0 for the first diffusion step.
442
+ :param clip_denoised: if True, clip the x_start prediction to [-1, 1].
443
+ :param denoised_fn: if not None, a function which applies to the
444
+ x_start prediction before it is used to sample.
445
+ :param cond_fn: if not None, this is a gradient function that acts
446
+ similarly to the model.
447
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
448
+ pass to the model. This can be used for conditioning.
449
+ :return: a dict containing the following keys:
450
+ - 'sample': a random sample from the model.
451
+ - 'pred_xstart': a prediction of x_0.
452
+ """
453
+ out = self.p_mean_variance(
454
+ model,
455
+ x,
456
+ t,
457
+ clip_denoised=clip_denoised,
458
+ denoised_fn=denoised_fn,
459
+ model_kwargs=model_kwargs,
460
+ analog_bit=analog_bit,
461
+ )
462
+ noise = th.randn_like(x)
463
+ nonzero_mask = (
464
+ (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
465
+ ) # no noise when t == 0
466
+ if cond_fn is not None:
467
+ out["mean"] = self.condition_mean(
468
+ cond_fn, out, x, t, model_kwargs=model_kwargs
469
+ )
470
+ sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
471
+ return {"sample": sample, "pred_xstart": out["pred_xstart"]}
472
+
473
+ def p_sample_loop(
474
+ self,
475
+ model,
476
+ shape,
477
+ noise=None,
478
+ clip_denoised=True,
479
+ denoised_fn=None,
480
+ cond_fn=None,
481
+ model_kwargs=None,
482
+ device=None,
483
+ progress=False,
484
+ analog_bit=None,
485
+ ):
486
+ """
487
+ Generate samples from the model.
488
+
489
+ :param model: the model module.
490
+ :param shape: the shape of the samples, (N, C, H, W).
491
+ :param noise: if specified, the noise from the encoder to sample.
492
+ Should be of the same shape as `shape`.
493
+ :param clip_denoised: if True, clip x_start predictions to [-1, 1].
494
+ :param denoised_fn: if not None, a function which applies to the
495
+ x_start prediction before it is used to sample.
496
+ :param cond_fn: if not None, this is a gradient function that acts
497
+ similarly to the model.
498
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
499
+ pass to the model. This can be used for conditioning.
500
+ :param device: if specified, the device to create the samples on.
501
+ If not specified, use a model parameter's device.
502
+ :param progress: if True, show a tqdm progress bar.
503
+ :return: a non-differentiable batch of samples.
504
+ """
505
+ myfinal = []
506
+ final = None
507
+ for i, sample in tqdm(enumerate(self.p_sample_loop_progressive(
508
+ model,
509
+ shape,
510
+ noise=noise,
511
+ clip_denoised=clip_denoised,
512
+ denoised_fn=denoised_fn,
513
+ cond_fn=cond_fn,
514
+ model_kwargs=model_kwargs,
515
+ device=device,
516
+ progress=progress,
517
+ analog_bit=analog_bit,
518
+ ))):
519
+ if i>970:
520
+ myfinal.append(sample['sample'])
521
+ final = sample
522
+ return th.stack(myfinal)
523
+ # return final["sample"]
524
+
525
+ def p_sample_loop_progressive(
526
+ self,
527
+ model,
528
+ shape,
529
+ noise=None,
530
+ clip_denoised=True,
531
+ denoised_fn=None,
532
+ cond_fn=None,
533
+ model_kwargs=None,
534
+ device=None,
535
+ progress=False,
536
+ analog_bit=None,
537
+ ):
538
+ """
539
+ Generate samples from the model and yield intermediate samples from
540
+ each timestep of diffusion.
541
+
542
+ Arguments are the same as p_sample_loop().
543
+ Returns a generator over dicts, where each dict is the return value of
544
+ p_sample().
545
+ """
546
+ if device is None:
547
+ device = next(model.parameters()).device
548
+ assert isinstance(shape, (tuple, list))
549
+ if noise is not None:
550
+ img = noise
551
+ else:
552
+ img = th.randn(*shape, device=device)
553
+ indices = list(range(self.num_timesteps))[::-1]
554
+
555
+ if progress:
556
+ # Lazy import so that we don't depend on tqdm.
557
+
558
+ indices = tqdm(indices)
559
+
560
+ for i in indices:
561
+ t = th.tensor([i] * shape[0], device=device)
562
+ with th.no_grad():
563
+ out = self.p_sample(
564
+ model,
565
+ img,
566
+ t,
567
+ clip_denoised=clip_denoised,
568
+ denoised_fn=denoised_fn,
569
+ cond_fn=cond_fn,
570
+ model_kwargs=model_kwargs,
571
+ analog_bit=analog_bit,
572
+ )
573
+ yield out
574
+ img = out["sample"]
575
+
576
+ def ddim_sample(
577
+ self,
578
+ model,
579
+ x,
580
+ t,
581
+ clip_denoised=True,
582
+ denoised_fn=None,
583
+ cond_fn=None,
584
+ model_kwargs=None,
585
+ eta=0.0,
586
+ ):
587
+ """
588
+ Sample x_{t-1} from the model using DDIM.
589
+
590
+ Same usage as p_sample().
591
+ """
592
+ out = self.p_mean_variance(
593
+ model,
594
+ x,
595
+ t,
596
+ clip_denoised=clip_denoised,
597
+ denoised_fn=denoised_fn,
598
+ model_kwargs=model_kwargs,
599
+ )
600
+ if cond_fn is not None:
601
+ out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
602
+
603
+ # Usually our model outputs epsilon, but we re-derive it
604
+ # in case we used x_start or x_prev prediction.
605
+ eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
606
+
607
+ alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
608
+ alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
609
+ sigma = (
610
+ eta
611
+ * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
612
+ * th.sqrt(1 - alpha_bar / alpha_bar_prev)
613
+ )
614
+ # Equation 12.
615
+ noise = th.randn_like(x)
616
+ mean_pred = (
617
+ out["pred_xstart"] * th.sqrt(alpha_bar_prev)
618
+ + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
619
+ )
620
+ nonzero_mask = (
621
+ (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
622
+ ) # no noise when t == 0
623
+ sample = mean_pred + nonzero_mask * sigma * noise
624
+ return {"sample": sample, "pred_xstart": out["pred_xstart"]}
625
+
626
+ def ddim_reverse_sample(
627
+ self,
628
+ model,
629
+ x,
630
+ t,
631
+ clip_denoised=True,
632
+ denoised_fn=None,
633
+ model_kwargs=None,
634
+ eta=0.0,
635
+ ):
636
+ """
637
+ Sample x_{t+1} from the model using DDIM reverse ODE.
638
+ """
639
+ assert eta == 0.0, "Reverse ODE only for deterministic path"
640
+ out = self.p_mean_variance(
641
+ model,
642
+ x,
643
+ t,
644
+ clip_denoised=clip_denoised,
645
+ denoised_fn=denoised_fn,
646
+ model_kwargs=model_kwargs,
647
+ )
648
+ # Usually our model outputs epsilon, but we re-derive it
649
+ # in case we used x_start or x_prev prediction.
650
+ eps = (
651
+ _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
652
+ - out["pred_xstart"]
653
+ ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
654
+ alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
655
+
656
+ # Equation 12. reversed
657
+ mean_pred = (
658
+ out["pred_xstart"] * th.sqrt(alpha_bar_next)
659
+ + th.sqrt(1 - alpha_bar_next) * eps
660
+ )
661
+
662
+ return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
663
+
664
+ def ddim_sample_loop(
665
+ self,
666
+ model,
667
+ shape,
668
+ noise=None,
669
+ clip_denoised=True,
670
+ denoised_fn=None,
671
+ cond_fn=None,
672
+ model_kwargs=None,
673
+ device=None,
674
+ progress=False,
675
+ eta=0.0,
676
+ ):
677
+ """
678
+ Generate samples from the model using DDIM.
679
+
680
+ Same usage as p_sample_loop().
681
+ """
682
+ # old code
683
+ # myfinal = []
684
+ # for i, sample in tqdm(enumerate(self.ddim_sample_loop_progressive(
685
+ # model,
686
+ # shape,
687
+ # noise=noise,
688
+ # clip_denoised=clip_denoised,
689
+ # denoised_fn=denoised_fn,
690
+ # cond_fn=cond_fn,
691
+ # model_kwargs=model_kwargs,
692
+ # device=device,
693
+ # progress=progress,
694
+ # eta=eta,
695
+ # ))):
696
+ # # if i>998:
697
+ # if i>498:
698
+ # # if i>98:
699
+ # myfinal.append(sample['sample'])
700
+ # return th.stack(myfinal)
701
+
702
+ # final = None
703
+ # for sample in self.ddim_sample_loop_progressive(
704
+ # model,
705
+ # shape,
706
+ # noise=noise,
707
+ # clip_denoised=clip_denoised,
708
+ # denoised_fn=denoised_fn,
709
+ # cond_fn=cond_fn,
710
+ # model_kwargs=model_kwargs,
711
+ # device=device,
712
+ # progress=progress,
713
+ # eta=eta,
714
+ # ):
715
+ # final = sample
716
+ # return final["sample"]
717
+
718
+ samples_list = list(tqdm(self.ddim_sample_loop_progressive(
719
+ model,
720
+ shape,
721
+ noise=noise,
722
+ clip_denoised=clip_denoised,
723
+ denoised_fn=denoised_fn,
724
+ cond_fn=cond_fn,
725
+ model_kwargs=model_kwargs,
726
+ device=device,
727
+ progress=progress,
728
+ eta=eta,
729
+ )))
730
+
731
+ last_sample = samples_list[-1]['sample']
732
+
733
+ return th.stack([last_sample])
734
+
735
+ def ddim_sample_loop_progressive(
736
+ self,
737
+ model,
738
+ shape,
739
+ noise=None,
740
+ clip_denoised=True,
741
+ denoised_fn=None,
742
+ cond_fn=None,
743
+ model_kwargs=None,
744
+ device=None,
745
+ progress=False,
746
+ eta=0.0,
747
+ ):
748
+ """
749
+ Use DDIM to sample from the model and yield intermediate samples from
750
+ each timestep of DDIM.
751
+
752
+ Same usage as p_sample_loop_progressive().
753
+ """
754
+ if device is None:
755
+ device = next(model.parameters()).device
756
+ assert isinstance(shape, (tuple, list))
757
+ if noise is not None:
758
+ img = noise
759
+ else:
760
+ img = th.randn(*shape, device=device)
761
+ indices = list(range(self.num_timesteps))[::-1]
762
+
763
+ if progress:
764
+ # Lazy import so that we don't depend on tqdm.
765
+ from tqdm.auto import tqdm
766
+
767
+ indices = tqdm(indices)
768
+
769
+ for i in indices:
770
+ t = th.tensor([i] * shape[0], device=device)
771
+ with th.no_grad():
772
+ out = self.ddim_sample(
773
+ model,
774
+ img,
775
+ t,
776
+ clip_denoised=clip_denoised,
777
+ denoised_fn=denoised_fn,
778
+ cond_fn=cond_fn,
779
+ model_kwargs=model_kwargs,
780
+ eta=eta,
781
+ )
782
+ yield out
783
+ img = out["sample"]
784
+
785
+ def _vb_terms_bpd(
786
+ self, model, x_start, x_t, t, padding_mask, clip_denoised=True, model_kwargs=None,
787
+ ):
788
+ """
789
+ Get a term for the variational lower-bound.
790
+
791
+ The resulting units are bits (rather than nats, as one might expect).
792
+ This allows for comparison to other papers.
793
+
794
+ :return: a dict with the following keys:
795
+ - 'output': a shape [N] tensor of NLLs or KLs.
796
+ - 'pred_xstart': the x_0 predictions.
797
+ """
798
+ true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
799
+ x_start=x_start, x_t=x_t, t=t
800
+ )
801
+ out = self.p_mean_variance(
802
+ model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
803
+ )
804
+ kl = normal_kl(
805
+ true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
806
+ )
807
+ kl = mean_flat(kl, padding_mask) / np.log(2.0)
808
+
809
+ decoder_nll = -discretized_gaussian_log_likelihood(
810
+ x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
811
+ )
812
+ assert decoder_nll.shape == x_start.shape
813
+ decoder_nll = mean_flat(decoder_nll, padding_mask) / np.log(2.0)
814
+
815
+ # At the first timestep return the decoder NLL,
816
+ # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
817
+ # output = th.where((t == 0), decoder_nll, kl)
818
+ output = kl
819
+ return {"output": output, "pred_xstart": out["pred_xstart"]}
820
+
821
+ def training_losses(self, model, x_start, t, model_kwargs, analog_bit, noise=None):
822
+ """
823
+ Compute training losses for a single timestep.
824
+
825
+ :param model: the model to evaluate loss on.
826
+ :param x_start: the [N x C x ...] tensor of inputs.
827
+ :param t: a batch of timestep indices.
828
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
829
+ pass to the model. This can be used for conditioning.
830
+ :param noise: if specified, the specific Gaussian noise to try to remove.
831
+ :return: a dict with the key "loss" containing a tensor of shape [N].
832
+ Some mean or variance settings may also have other keys.
833
+ """
834
+ if model_kwargs is None:
835
+ model_kwargs = {}
836
+ if noise is None:
837
+ noise = th.randn_like(x_start)
838
+ x_t = self.q_sample(x_start, t, noise=noise)
839
+
840
+ terms = {}
841
+ tmp_mask = (1 - model_kwargs['src_key_padding_mask'])
842
+
843
+ if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
844
+ terms["loss"] = self._vb_terms_bpd(
845
+ model=model,
846
+ x_start=x_start,
847
+ x_t=x_t,
848
+ padding_mask = tmp_mask,
849
+ t=t,
850
+ clip_denoised=False,
851
+ model_kwargs=model_kwargs,
852
+ )["output"]
853
+ if self.loss_type == LossType.RESCALED_KL:
854
+ terms["loss"] *= self.num_timesteps
855
+ elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
856
+ xtalpha = _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape).permute([0,2,1])
857
+ epsalpha = _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape).permute([0,2,1])
858
+ model_output_dec, model_output_bin = model(x_t, self._scale_timesteps(t), xtalpha=xtalpha, epsalpha=epsalpha, **model_kwargs)
859
+ # model_output_dec = model(x_t, self._scale_timesteps(t), **model_kwargs)
860
+
861
+ if self.model_var_type in [
862
+ ModelVarType.LEARNED,
863
+ ModelVarType.LEARNED_RANGE,
864
+ ]:
865
+ B, C = x_t.shape[:2]
866
+ assert model_output.shape == (B, C * 2, *x_t.shape[2:])
867
+ model_output, model_var_values = th.split(model_output, C, dim=1)
868
+ # Learn the variance using the variational bound, but don't let
869
+ # it affect our mean prediction.
870
+ frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
871
+ terms["vb"] = self._vb_terms_bpd(
872
+ model=lambda *args, r=frozen_out: r,
873
+ x_start=x_start,
874
+ x_t=x_t,
875
+ padding_mask = tmp_mask,
876
+ t=t,
877
+ clip_denoised=False,
878
+ )["output"]
879
+ if self.loss_type == LossType.RESCALED_MSE:
880
+ # Divide by 1000 for equivalence with initial implementation.
881
+ # Without a factor of 1/1000, the VB term hurts the MSE term.
882
+ terms["vb"] *= self.num_timesteps / 1000.0
883
+
884
+ target = {
885
+ ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
886
+ x_start=x_start, x_t=x_t, t=t
887
+ )[0],
888
+ ModelMeanType.START_X: x_start,
889
+ ModelMeanType.EPSILON: noise,
890
+ }[self.model_mean_type]
891
+
892
+ if not analog_bit:
893
+ def dec2bin(xinp, bits):
894
+ mask = 2 ** th.arange(bits - 1, -1, -1).to(xinp.device, xinp.dtype)
895
+ return xinp.unsqueeze(-1).bitwise_and(mask).ne(0).float()
896
+ bin_target = x_start.detach()
897
+ bin_target = (bin_target/2 + 0.5) # -> [0,1]
898
+ bin_target = bin_target * 256 #-> [0, 256]
899
+ bin_target = dec2bin(bin_target.permute([0,2,1]).round().int(), 8)
900
+ bin_target = bin_target.reshape([target.shape[0], target.shape[2], 16]).permute([0,2,1])
901
+ t_weights = (t<10).cuda().unsqueeze(1).unsqueeze(2)
902
+ t_weights = t_weights * (t_weights.shape[0]/max(1, t_weights.sum()))
903
+ bin_target[bin_target==0] = -1
904
+ assert model_output_bin.shape == bin_target.shape
905
+
906
+ assert model_output_dec.shape == target.shape == x_start.shape
907
+
908
+ if not analog_bit:
909
+ terms["mse_bin"] = mean_flat(((bin_target - model_output_bin) ** 2) * t_weights, tmp_mask)
910
+ terms["mse_dec"] = mean_flat(((target - model_output_dec) ** 2), tmp_mask)
911
+
912
+ if "vb" in terms:
913
+ terms["loss"] = terms["mse"] + terms["vb"]
914
+ else:
915
+ if not analog_bit:
916
+ terms["loss"] = terms["mse_dec"] + terms["mse_bin"]
917
+ else:
918
+ terms["loss"] = terms["mse_dec"]
919
+ else:
920
+ raise NotImplementedError(self.loss_type)
921
+
922
+ return terms
923
+
924
+ def _prior_bpd(self, x_start):
925
+ """
926
+ Get the prior KL term for the variational lower-bound, measured in
927
+ bits-per-dim.
928
+
929
+ This term can't be optimized, as it only depends on the encoder.
930
+
931
+ :param x_start: the [N x C x ...] tensor of inputs.
932
+ :return: a batch of [N] KL values (in bits), one per batch element.
933
+ """
934
+ batch_size = x_start.shape[0]
935
+ t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
936
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
937
+ kl_prior = normal_kl(
938
+ mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
939
+ )
940
+ return mean_flat(kl_prior) / np.log(2.0)
941
+
942
+ def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
943
+ """
944
+ Compute the entire variational lower-bound, measured in bits-per-dim,
945
+ as well as other related quantities.
946
+
947
+ :param model: the model to evaluate loss on.
948
+ :param x_start: the [N x C x ...] tensor of inputs.
949
+ :param clip_denoised: if True, clip denoised samples.
950
+ :param model_kwargs: if not None, a dict of extra keyword arguments to
951
+ pass to the model. This can be used for conditioning.
952
+
953
+ :return: a dict containing the following keys:
954
+ - total_bpd: the total variational lower-bound, per batch element.
955
+ - prior_bpd: the prior term in the lower-bound.
956
+ - vb: an [N x T] tensor of terms in the lower-bound.
957
+ - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
958
+ - mse: an [N x T] tensor of epsilon MSEs for each timestep.
959
+ """
960
+ device = x_start.device
961
+ batch_size = x_start.shape[0]
962
+
963
+ vb = []
964
+ xstart_mse = []
965
+ mse = []
966
+ for t in list(range(self.num_timesteps))[::-1]:
967
+ t_batch = th.tensor([t] * batch_size, device=device)
968
+ noise = th.randn_like(x_start)
969
+ x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
970
+ # Calculate VLB term at the current timestep
971
+ with th.no_grad():
972
+ out = self._vb_terms_bpd(
973
+ model,
974
+ x_start=x_start,
975
+ x_t=x_t,
976
+ t=t_batch,
977
+ clip_denoised=clip_denoised,
978
+ model_kwargs=model_kwargs,
979
+ )
980
+ vb.append(out["output"])
981
+ xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
982
+ eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
983
+ mse.append(mean_flat((eps - noise) ** 2))
984
+
985
+ vb = th.stack(vb, dim=1)
986
+ xstart_mse = th.stack(xstart_mse, dim=1)
987
+ mse = th.stack(mse, dim=1)
988
+
989
+ prior_bpd = self._prior_bpd(x_start)
990
+ total_bpd = vb.sum(dim=1) + prior_bpd
991
+ return {
992
+ "total_bpd": total_bpd,
993
+ "prior_bpd": prior_bpd,
994
+ "vb": vb,
995
+ "xstart_mse": xstart_mse,
996
+ "mse": mse,
997
+ }
998
+
999
+
1000
+ def _extract_into_tensor(arr, timesteps, broadcast_shape):
1001
+ """
1002
+ Extract values from a 1-D numpy array for a batch of indices.
1003
+
1004
+ :param arr: the 1-D numpy array.
1005
+ :param timesteps: a tensor of indices into the array to extract.
1006
+ :param broadcast_shape: a larger shape of K dimensions with the batch
1007
+ dimension equal to the length of timesteps.
1008
+ :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
1009
+ """
1010
+ res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
1011
+ while len(res.shape) < len(broadcast_shape):
1012
+ res = res[..., None]
1013
+ return res.expand(broadcast_shape)
house_diffusion/logger.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
3
+ https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import shutil
9
+ import os.path as osp
10
+ import json
11
+ import time
12
+ import datetime
13
+ import tempfile
14
+ import warnings
15
+ from collections import defaultdict
16
+ from contextlib import contextmanager
17
+
18
+ DEBUG = 10
19
+ INFO = 20
20
+ WARN = 30
21
+ ERROR = 40
22
+
23
+ DISABLED = 50
24
+
25
+
26
+ class KVWriter(object):
27
+ def writekvs(self, kvs):
28
+ raise NotImplementedError
29
+
30
+
31
+ class SeqWriter(object):
32
+ def writeseq(self, seq):
33
+ raise NotImplementedError
34
+
35
+
36
+ class HumanOutputFormat(KVWriter, SeqWriter):
37
+ def __init__(self, filename_or_file):
38
+ if isinstance(filename_or_file, str):
39
+ self.file = open(filename_or_file, "wt")
40
+ self.own_file = True
41
+ else:
42
+ assert hasattr(filename_or_file, "read"), (
43
+ "expected file or str, got %s" % filename_or_file
44
+ )
45
+ self.file = filename_or_file
46
+ self.own_file = False
47
+
48
+ def writekvs(self, kvs):
49
+ # Create strings for printing
50
+ key2str = {}
51
+ for (key, val) in sorted(kvs.items()):
52
+ if hasattr(val, "__float__"):
53
+ valstr = "%-8.3g" % val
54
+ else:
55
+ valstr = str(val)
56
+ key2str[self._truncate(key)] = self._truncate(valstr)
57
+
58
+ # Find max widths
59
+ if len(key2str) == 0:
60
+ print("WARNING: tried to write empty key-value dict")
61
+ return
62
+ else:
63
+ keywidth = max(map(len, key2str.keys()))
64
+ valwidth = max(map(len, key2str.values()))
65
+
66
+ # Write out the data
67
+ dashes = "-" * (keywidth + valwidth + 7)
68
+ lines = [dashes]
69
+ for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
70
+ lines.append(
71
+ "| %s%s | %s%s |"
72
+ % (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
73
+ )
74
+ lines.append(dashes)
75
+ self.file.write("\n".join(lines) + "\n")
76
+
77
+ # Flush the output to the file
78
+ self.file.flush()
79
+
80
+ def _truncate(self, s):
81
+ maxlen = 30
82
+ return s[: maxlen - 3] + "..." if len(s) > maxlen else s
83
+
84
+ def writeseq(self, seq):
85
+ seq = list(seq)
86
+ for (i, elem) in enumerate(seq):
87
+ self.file.write(elem)
88
+ if i < len(seq) - 1: # add space unless this is the last one
89
+ self.file.write(" ")
90
+ self.file.write("\n")
91
+ self.file.flush()
92
+
93
+ def close(self):
94
+ if self.own_file:
95
+ self.file.close()
96
+
97
+
98
+ class JSONOutputFormat(KVWriter):
99
+ def __init__(self, filename):
100
+ self.file = open(filename, "wt")
101
+
102
+ def writekvs(self, kvs):
103
+ for k, v in sorted(kvs.items()):
104
+ if hasattr(v, "dtype"):
105
+ kvs[k] = float(v)
106
+ self.file.write(json.dumps(kvs) + "\n")
107
+ self.file.flush()
108
+
109
+ def close(self):
110
+ self.file.close()
111
+
112
+
113
+ class CSVOutputFormat(KVWriter):
114
+ def __init__(self, filename):
115
+ self.file = open(filename, "w+t")
116
+ self.keys = []
117
+ self.sep = ","
118
+
119
+ def writekvs(self, kvs):
120
+ # Add our current row to the history
121
+ extra_keys = list(kvs.keys() - self.keys)
122
+ extra_keys.sort()
123
+ if extra_keys:
124
+ self.keys.extend(extra_keys)
125
+ self.file.seek(0)
126
+ lines = self.file.readlines()
127
+ self.file.seek(0)
128
+ for (i, k) in enumerate(self.keys):
129
+ if i > 0:
130
+ self.file.write(",")
131
+ self.file.write(k)
132
+ self.file.write("\n")
133
+ for line in lines[1:]:
134
+ self.file.write(line[:-1])
135
+ self.file.write(self.sep * len(extra_keys))
136
+ self.file.write("\n")
137
+ for (i, k) in enumerate(self.keys):
138
+ if i > 0:
139
+ self.file.write(",")
140
+ v = kvs.get(k)
141
+ if v is not None:
142
+ self.file.write(str(v))
143
+ self.file.write("\n")
144
+ self.file.flush()
145
+
146
+ def close(self):
147
+ self.file.close()
148
+
149
+
150
+ class TensorBoardOutputFormat(KVWriter):
151
+ """
152
+ Dumps key/value pairs into TensorBoard's numeric format.
153
+ """
154
+
155
+ def __init__(self, dir):
156
+ os.makedirs(dir, exist_ok=True)
157
+ self.dir = dir
158
+ self.step = 1
159
+ prefix = "events"
160
+ path = osp.join(osp.abspath(dir), prefix)
161
+ import tensorflow as tf
162
+ from tensorflow.python import pywrap_tensorflow
163
+ from tensorflow.core.util import event_pb2
164
+ from tensorflow.python.util import compat
165
+
166
+ self.tf = tf
167
+ self.event_pb2 = event_pb2
168
+ self.pywrap_tensorflow = pywrap_tensorflow
169
+ self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
170
+
171
+ def writekvs(self, kvs):
172
+ def summary_val(k, v):
173
+ kwargs = {"tag": k, "simple_value": float(v)}
174
+ return self.tf.Summary.Value(**kwargs)
175
+
176
+ summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
177
+ event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
178
+ event.step = (
179
+ self.step
180
+ ) # is there any reason why you'd want to specify the step?
181
+ self.writer.WriteEvent(event)
182
+ self.writer.Flush()
183
+ self.step += 1
184
+
185
+ def close(self):
186
+ if self.writer:
187
+ self.writer.Close()
188
+ self.writer = None
189
+
190
+
191
+ def make_output_format(format, ev_dir, log_suffix=""):
192
+ os.makedirs(ev_dir, exist_ok=True)
193
+ if format == "stdout":
194
+ return HumanOutputFormat(sys.stdout)
195
+ elif format == "log":
196
+ return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
197
+ elif format == "json":
198
+ return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
199
+ elif format == "csv":
200
+ return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
201
+ elif format == "tensorboard":
202
+ return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
203
+ else:
204
+ raise ValueError("Unknown format specified: %s" % (format,))
205
+
206
+
207
+ # ================================================================
208
+ # API
209
+ # ================================================================
210
+
211
+
212
+ def logkv(key, val):
213
+ """
214
+ Log a value of some diagnostic
215
+ Call this once for each diagnostic quantity, each iteration
216
+ If called many times, last value will be used.
217
+ """
218
+ get_current().logkv(key, val)
219
+
220
+
221
+ def logkv_mean(key, val):
222
+ """
223
+ The same as logkv(), but if called many times, values averaged.
224
+ """
225
+ get_current().logkv_mean(key, val)
226
+
227
+
228
+ def logkvs(d):
229
+ """
230
+ Log a dictionary of key-value pairs
231
+ """
232
+ for (k, v) in d.items():
233
+ logkv(k, v)
234
+
235
+
236
+ def dumpkvs():
237
+ """
238
+ Write all of the diagnostics from the current iteration
239
+ """
240
+ return get_current().dumpkvs()
241
+
242
+
243
+ def getkvs():
244
+ return get_current().name2val
245
+
246
+
247
+ def log(*args, level=INFO):
248
+ """
249
+ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
250
+ """
251
+ get_current().log(*args, level=level)
252
+
253
+
254
+ def debug(*args):
255
+ log(*args, level=DEBUG)
256
+
257
+
258
+ def info(*args):
259
+ log(*args, level=INFO)
260
+
261
+
262
+ def warn(*args):
263
+ log(*args, level=WARN)
264
+
265
+
266
+ def error(*args):
267
+ log(*args, level=ERROR)
268
+
269
+
270
+ def set_level(level):
271
+ """
272
+ Set logging threshold on current logger.
273
+ """
274
+ get_current().set_level(level)
275
+
276
+
277
+ def set_comm(comm):
278
+ get_current().set_comm(comm)
279
+
280
+
281
+ def get_dir():
282
+ """
283
+ Get directory that log files are being written to.
284
+ will be None if there is no output directory (i.e., if you didn't call start)
285
+ """
286
+ return get_current().get_dir()
287
+
288
+
289
+ record_tabular = logkv
290
+ dump_tabular = dumpkvs
291
+
292
+
293
+ @contextmanager
294
+ def profile_kv(scopename):
295
+ logkey = "wait_" + scopename
296
+ tstart = time.time()
297
+ try:
298
+ yield
299
+ finally:
300
+ get_current().name2val[logkey] += time.time() - tstart
301
+
302
+
303
+ def profile(n):
304
+ """
305
+ Usage:
306
+ @profile("my_func")
307
+ def my_func(): code
308
+ """
309
+
310
+ def decorator_with_name(func):
311
+ def func_wrapper(*args, **kwargs):
312
+ with profile_kv(n):
313
+ return func(*args, **kwargs)
314
+
315
+ return func_wrapper
316
+
317
+ return decorator_with_name
318
+
319
+
320
+ # ================================================================
321
+ # Backend
322
+ # ================================================================
323
+
324
+
325
+ def get_current():
326
+ if Logger.CURRENT is None:
327
+ _configure_default_logger()
328
+
329
+ return Logger.CURRENT
330
+
331
+
332
+ class Logger(object):
333
+ DEFAULT = None # A logger with no output files. (See right below class definition)
334
+ # So that you can still log to the terminal without setting up any output files
335
+ CURRENT = None # Current logger being used by the free functions above
336
+
337
+ def __init__(self, dir, output_formats, comm=None):
338
+ self.name2val = defaultdict(float) # values this iteration
339
+ self.name2cnt = defaultdict(int)
340
+ self.level = INFO
341
+ self.dir = dir
342
+ self.output_formats = output_formats
343
+ self.comm = comm
344
+
345
+ # Logging API, forwarded
346
+ # ----------------------------------------
347
+ def logkv(self, key, val):
348
+ self.name2val[key] = val
349
+
350
+ def logkv_mean(self, key, val):
351
+ oldval, cnt = self.name2val[key], self.name2cnt[key]
352
+ self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
353
+ self.name2cnt[key] = cnt + 1
354
+
355
+ def dumpkvs(self):
356
+ if self.comm is None:
357
+ d = self.name2val
358
+ else:
359
+ d = mpi_weighted_mean(
360
+ self.comm,
361
+ {
362
+ name: (val, self.name2cnt.get(name, 1))
363
+ for (name, val) in self.name2val.items()
364
+ },
365
+ )
366
+ if self.comm.rank != 0:
367
+ d["dummy"] = 1 # so we don't get a warning about empty dict
368
+ out = d.copy() # Return the dict for unit testing purposes
369
+ for fmt in self.output_formats:
370
+ if isinstance(fmt, KVWriter):
371
+ fmt.writekvs(d)
372
+ self.name2val.clear()
373
+ self.name2cnt.clear()
374
+ return out
375
+
376
+ def log(self, *args, level=INFO):
377
+ if self.level <= level:
378
+ self._do_log(args)
379
+
380
+ # Configuration
381
+ # ----------------------------------------
382
+ def set_level(self, level):
383
+ self.level = level
384
+
385
+ def set_comm(self, comm):
386
+ self.comm = comm
387
+
388
+ def get_dir(self):
389
+ return self.dir
390
+
391
+ def close(self):
392
+ for fmt in self.output_formats:
393
+ fmt.close()
394
+
395
+ # Misc
396
+ # ----------------------------------------
397
+ def _do_log(self, args):
398
+ for fmt in self.output_formats:
399
+ if isinstance(fmt, SeqWriter):
400
+ fmt.writeseq(map(str, args))
401
+
402
+
403
+ def get_rank_without_mpi_import():
404
+ # check environment variables here instead of importing mpi4py
405
+ # to avoid calling MPI_Init() when this module is imported
406
+ for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
407
+ if varname in os.environ:
408
+ return int(os.environ[varname])
409
+ return 0
410
+
411
+
412
+ def mpi_weighted_mean(comm, local_name2valcount):
413
+ """
414
+ Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
415
+ Perform a weighted average over dicts that are each on a different node
416
+ Input: local_name2valcount: dict mapping key -> (value, count)
417
+ Returns: key -> mean
418
+ """
419
+ all_name2valcount = comm.gather(local_name2valcount)
420
+ if comm.rank == 0:
421
+ name2sum = defaultdict(float)
422
+ name2count = defaultdict(float)
423
+ for n2vc in all_name2valcount:
424
+ for (name, (val, count)) in n2vc.items():
425
+ try:
426
+ val = float(val)
427
+ except ValueError:
428
+ if comm.rank == 0:
429
+ warnings.warn(
430
+ "WARNING: tried to compute mean on non-float {}={}".format(
431
+ name, val
432
+ )
433
+ )
434
+ else:
435
+ name2sum[name] += val * count
436
+ name2count[name] += count
437
+ return {name: name2sum[name] / name2count[name] for name in name2sum}
438
+ else:
439
+ return {}
440
+
441
+
442
+ def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
443
+ """
444
+ If comm is provided, average all numerical stats across that comm
445
+ """
446
+ if dir is None:
447
+ dir = os.getenv("OPENAI_LOGDIR")
448
+ if dir is None:
449
+ dir = osp.join(
450
+ # tempfile.gettempdir(),
451
+ 'ckpts',
452
+ datetime.datetime.now().strftime("openai_%Y_%m_%d_%H_%M_%S_%f"),
453
+ )
454
+ assert isinstance(dir, str)
455
+ dir = os.path.expanduser(dir)
456
+ os.makedirs(os.path.expanduser(dir), exist_ok=True)
457
+
458
+ rank = get_rank_without_mpi_import()
459
+ if rank > 0:
460
+ log_suffix = log_suffix + "-rank%03i" % rank
461
+
462
+ if format_strs is None:
463
+ if rank == 0:
464
+ format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
465
+ else:
466
+ format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
467
+ format_strs = filter(None, format_strs)
468
+ output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
469
+
470
+ Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
471
+ if output_formats:
472
+ log("Logging to %s" % dir)
473
+
474
+
475
+ def _configure_default_logger():
476
+ configure()
477
+ Logger.DEFAULT = Logger.CURRENT
478
+
479
+
480
+ def reset():
481
+ if Logger.CURRENT is not Logger.DEFAULT:
482
+ Logger.CURRENT.close()
483
+ Logger.CURRENT = Logger.DEFAULT
484
+ log("Reset logger")
485
+
486
+
487
+ @contextmanager
488
+ def scoped_configure(dir=None, format_strs=None, comm=None):
489
+ prevlogger = Logger.CURRENT
490
+ configure(dir=dir, format_strs=format_strs, comm=comm)
491
+ try:
492
+ yield
493
+ finally:
494
+ Logger.CURRENT.close()
495
+ Logger.CURRENT = prevlogger
496
+
house_diffusion/losses.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for various likelihood-based losses. These are ported from the original
3
+ Ho et al. diffusion models codebase:
4
+ https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
5
+ """
6
+
7
+ import numpy as np
8
+
9
+ import torch as th
10
+
11
+
12
+ def normal_kl(mean1, logvar1, mean2, logvar2):
13
+ """
14
+ Compute the KL divergence between two gaussians.
15
+
16
+ Shapes are automatically broadcasted, so batches can be compared to
17
+ scalars, among other use cases.
18
+ """
19
+ tensor = None
20
+ for obj in (mean1, logvar1, mean2, logvar2):
21
+ if isinstance(obj, th.Tensor):
22
+ tensor = obj
23
+ break
24
+ assert tensor is not None, "at least one argument must be a Tensor"
25
+
26
+ # Force variances to be Tensors. Broadcasting helps convert scalars to
27
+ # Tensors, but it does not work for th.exp().
28
+ logvar1, logvar2 = [
29
+ x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
30
+ for x in (logvar1, logvar2)
31
+ ]
32
+
33
+ return 0.5 * (
34
+ -1.0
35
+ + logvar2
36
+ - logvar1
37
+ + th.exp(logvar1 - logvar2)
38
+ + ((mean1 - mean2) ** 2) * th.exp(-logvar2)
39
+ )
40
+
41
+
42
+ def approx_standard_normal_cdf(x):
43
+ """
44
+ A fast approximation of the cumulative distribution function of the
45
+ standard normal.
46
+ """
47
+ return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
48
+
49
+
50
+ def discretized_gaussian_log_likelihood(x, *, means, log_scales):
51
+ """
52
+ Compute the log-likelihood of a Gaussian distribution discretizing to a
53
+ given image.
54
+
55
+ :param x: the target images. It is assumed that this was uint8 values,
56
+ rescaled to the range [-1, 1].
57
+ :param means: the Gaussian mean Tensor.
58
+ :param log_scales: the Gaussian log stddev Tensor.
59
+ :return: a tensor like x of log probabilities (in nats).
60
+ """
61
+ assert x.shape == means.shape == log_scales.shape
62
+ centered_x = x - means
63
+ inv_stdv = th.exp(-log_scales)
64
+ plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
65
+ cdf_plus = approx_standard_normal_cdf(plus_in)
66
+ min_in = inv_stdv * (centered_x - 1.0 / 255.0)
67
+ cdf_min = approx_standard_normal_cdf(min_in)
68
+ log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
69
+ log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
70
+ cdf_delta = cdf_plus - cdf_min
71
+ log_probs = th.where(
72
+ x < -0.999,
73
+ log_cdf_plus,
74
+ th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
75
+ )
76
+ assert log_probs.shape == x.shape
77
+ return log_probs
house_diffusion/nn.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Various utilities for neural networks.
3
+ """
4
+
5
+ import math
6
+
7
+ import torch as th
8
+ import torch.nn as nn
9
+
10
+
11
+ # PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
12
+ class SiLU(nn.Module):
13
+ def forward(self, x):
14
+ return x * th.sigmoid(x)
15
+
16
+
17
+ class GroupNorm32(nn.GroupNorm):
18
+ def forward(self, x):
19
+ return super().forward(x.float()).type(x.dtype)
20
+
21
+
22
+ def conv_nd(dims, *args, **kwargs):
23
+ """
24
+ Create a 1D, 2D, or 3D convolution module.
25
+ """
26
+ if dims == 1:
27
+ return nn.Conv1d(*args, **kwargs)
28
+ elif dims == 2:
29
+ return nn.Conv2d(*args, **kwargs)
30
+ elif dims == 3:
31
+ return nn.Conv3d(*args, **kwargs)
32
+ raise ValueError(f"unsupported dimensions: {dims}")
33
+
34
+
35
+ def linear(*args, **kwargs):
36
+ """
37
+ Create a linear module.
38
+ """
39
+ return nn.Linear(*args, **kwargs)
40
+
41
+
42
+ def avg_pool_nd(dims, *args, **kwargs):
43
+ """
44
+ Create a 1D, 2D, or 3D average pooling module.
45
+ """
46
+ if dims == 1:
47
+ return nn.AvgPool1d(*args, **kwargs)
48
+ elif dims == 2:
49
+ return nn.AvgPool2d(*args, **kwargs)
50
+ elif dims == 3:
51
+ return nn.AvgPool3d(*args, **kwargs)
52
+ raise ValueError(f"unsupported dimensions: {dims}")
53
+
54
+
55
+ def update_ema(target_params, source_params, rate=0.99):
56
+ """
57
+ Update target parameters to be closer to those of source parameters using
58
+ an exponential moving average.
59
+
60
+ :param target_params: the target parameter sequence.
61
+ :param source_params: the source parameter sequence.
62
+ :param rate: the EMA rate (closer to 1 means slower).
63
+ """
64
+ for targ, src in zip(target_params, source_params):
65
+ targ.detach().mul_(rate).add_(src, alpha=1 - rate)
66
+
67
+
68
+ def zero_module(module):
69
+ """
70
+ Zero out the parameters of a module and return it.
71
+ """
72
+ for p in module.parameters():
73
+ p.detach().zero_()
74
+ return module
75
+
76
+
77
+ def scale_module(module, scale):
78
+ """
79
+ Scale the parameters of a module and return it.
80
+ """
81
+ for p in module.parameters():
82
+ p.detach().mul_(scale)
83
+ return module
84
+
85
+
86
+ def mean_flat(tensor, padding_mask):
87
+ """
88
+ Take the mean over all non-batch dimensions.
89
+ """
90
+ tensor = tensor * padding_mask.unsqueeze(1)
91
+ tensor = tensor.mean(dim=list(range(1, len(tensor.shape))))/th.sum(padding_mask, dim=1)
92
+ return tensor
93
+
94
+
95
+ def normalization(channels):
96
+ """
97
+ Make a standard normalization layer.
98
+
99
+ :param channels: number of input channels.
100
+ :return: an nn.Module for normalization.
101
+ """
102
+ return GroupNorm32(32, channels)
103
+
104
+
105
+ def timestep_embedding(timesteps, dim, max_period=10000):
106
+ """
107
+ Create sinusoidal timestep embeddings.
108
+
109
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
110
+ These may be fractional.
111
+ :param dim: the dimension of the output.
112
+ :param max_period: controls the minimum frequency of the embeddings.
113
+ :return: an [N x dim] Tensor of positional embeddings.
114
+ """
115
+ half = dim // 2
116
+ freqs = th.exp(
117
+ -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
118
+ ).to(device=timesteps.device)
119
+ args = timesteps[:, None].float() * freqs[None]
120
+ embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
121
+ if dim % 2:
122
+ embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
123
+ return embedding
124
+
125
+
126
+ def checkpoint(func, inputs, params, flag):
127
+ """
128
+ Evaluate a function without caching intermediate activations, allowing for
129
+ reduced memory at the expense of extra compute in the backward pass.
130
+
131
+ :param func: the function to evaluate.
132
+ :param inputs: the argument sequence to pass to `func`.
133
+ :param params: a sequence of parameters `func` depends on but does not
134
+ explicitly take as arguments.
135
+ :param flag: if False, disable gradient checkpointing.
136
+ """
137
+ if flag:
138
+ args = tuple(inputs) + tuple(params)
139
+ return CheckpointFunction.apply(func, len(inputs), *args)
140
+ else:
141
+ return func(*inputs)
142
+
143
+
144
+ class CheckpointFunction(th.autograd.Function):
145
+ @staticmethod
146
+ def forward(ctx, run_function, length, *args):
147
+ ctx.run_function = run_function
148
+ ctx.input_tensors = list(args[:length])
149
+ ctx.input_params = list(args[length:])
150
+ with th.no_grad():
151
+ output_tensors = ctx.run_function(*ctx.input_tensors)
152
+ return output_tensors
153
+
154
+ @staticmethod
155
+ def backward(ctx, *output_grads):
156
+ ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
157
+ with th.enable_grad():
158
+ # Fixes a bug where the first op in run_function modifies the
159
+ # Tensor storage in place, which is not allowed for detach()'d
160
+ # Tensors.
161
+ shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
162
+ output_tensors = ctx.run_function(*shallow_copies)
163
+ input_grads = th.autograd.grad(
164
+ output_tensors,
165
+ ctx.input_tensors + ctx.input_params,
166
+ output_grads,
167
+ allow_unused=True,
168
+ )
169
+ del ctx.input_tensors
170
+ del ctx.input_params
171
+ del output_tensors
172
+ return (None, None) + input_grads
house_diffusion/resample.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+
3
+ import numpy as np
4
+ import torch as th
5
+ import torch.distributed as dist
6
+
7
+
8
+ def create_named_schedule_sampler(name, diffusion):
9
+ """
10
+ Create a ScheduleSampler from a library of pre-defined samplers.
11
+
12
+ :param name: the name of the sampler.
13
+ :param diffusion: the diffusion object to sample for.
14
+ """
15
+ if name == "uniform":
16
+ return UniformSampler(diffusion)
17
+ elif name == "loss-second-moment":
18
+ return LossSecondMomentResampler(diffusion)
19
+ else:
20
+ raise NotImplementedError(f"unknown schedule sampler: {name}")
21
+
22
+
23
+ class ScheduleSampler(ABC):
24
+ """
25
+ A distribution over timesteps in the diffusion process, intended to reduce
26
+ variance of the objective.
27
+
28
+ By default, samplers perform unbiased importance sampling, in which the
29
+ objective's mean is unchanged.
30
+ However, subclasses may override sample() to change how the resampled
31
+ terms are reweighted, allowing for actual changes in the objective.
32
+ """
33
+
34
+ @abstractmethod
35
+ def weights(self):
36
+ """
37
+ Get a numpy array of weights, one per diffusion step.
38
+
39
+ The weights needn't be normalized, but must be positive.
40
+ """
41
+
42
+ def sample(self, batch_size, device):
43
+ """
44
+ Importance-sample timesteps for a batch.
45
+
46
+ :param batch_size: the number of timesteps.
47
+ :param device: the torch device to save to.
48
+ :return: a tuple (timesteps, weights):
49
+ - timesteps: a tensor of timestep indices.
50
+ - weights: a tensor of weights to scale the resulting losses.
51
+ """
52
+ w = self.weights()
53
+ p = w / np.sum(w)
54
+ indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
55
+ indices = th.from_numpy(indices_np).long().to(device)
56
+ weights_np = 1 / (len(p) * p[indices_np])
57
+ weights = th.from_numpy(weights_np).float().to(device)
58
+ return indices, weights
59
+
60
+
61
+ class UniformSampler(ScheduleSampler):
62
+ def __init__(self, diffusion):
63
+ self.diffusion = diffusion
64
+ self._weights = np.ones([diffusion.num_timesteps])
65
+
66
+ def weights(self):
67
+ return self._weights
68
+
69
+
70
+ class LossAwareSampler(ScheduleSampler):
71
+ def update_with_local_losses(self, local_ts, local_losses):
72
+ """
73
+ Update the reweighting using losses from a model.
74
+
75
+ Call this method from each rank with a batch of timesteps and the
76
+ corresponding losses for each of those timesteps.
77
+ This method will perform synchronization to make sure all of the ranks
78
+ maintain the exact same reweighting.
79
+
80
+ :param local_ts: an integer Tensor of timesteps.
81
+ :param local_losses: a 1D Tensor of losses.
82
+ """
83
+ batch_sizes = [
84
+ th.tensor([0], dtype=th.int32, device=local_ts.device)
85
+ for _ in range(dist.get_world_size())
86
+ ]
87
+ dist.all_gather(
88
+ batch_sizes,
89
+ th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
90
+ )
91
+
92
+ # Pad all_gather batches to be the maximum batch size.
93
+ batch_sizes = [x.item() for x in batch_sizes]
94
+ max_bs = max(batch_sizes)
95
+
96
+ timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
97
+ loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
98
+ dist.all_gather(timestep_batches, local_ts)
99
+ dist.all_gather(loss_batches, local_losses)
100
+ timesteps = [
101
+ x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
102
+ ]
103
+ losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
104
+ self.update_with_all_losses(timesteps, losses)
105
+
106
+ @abstractmethod
107
+ def update_with_all_losses(self, ts, losses):
108
+ """
109
+ Update the reweighting using losses from a model.
110
+
111
+ Sub-classes should override this method to update the reweighting
112
+ using losses from the model.
113
+
114
+ This method directly updates the reweighting without synchronizing
115
+ between workers. It is called by update_with_local_losses from all
116
+ ranks with identical arguments. Thus, it should have deterministic
117
+ behavior to maintain state across workers.
118
+
119
+ :param ts: a list of int timesteps.
120
+ :param losses: a list of float losses, one per timestep.
121
+ """
122
+
123
+
124
+ class LossSecondMomentResampler(LossAwareSampler):
125
+ def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
126
+ self.diffusion = diffusion
127
+ self.history_per_term = history_per_term
128
+ self.uniform_prob = uniform_prob
129
+ self._loss_history = np.zeros(
130
+ [diffusion.num_timesteps, history_per_term], dtype=np.float64
131
+ )
132
+ self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
133
+
134
+ def weights(self):
135
+ if not self._warmed_up():
136
+ return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
137
+ weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
138
+ weights /= np.sum(weights)
139
+ weights *= 1 - self.uniform_prob
140
+ weights += self.uniform_prob / len(weights)
141
+ return weights
142
+
143
+ def update_with_all_losses(self, ts, losses):
144
+ for t, loss in zip(ts, losses):
145
+ if self._loss_counts[t] == self.history_per_term:
146
+ # Shift out the oldest loss term.
147
+ self._loss_history[t, :-1] = self._loss_history[t, 1:]
148
+ self._loss_history[t, -1] = loss
149
+ else:
150
+ self._loss_history[t, self._loss_counts[t]] = loss
151
+ self._loss_counts[t] += 1
152
+
153
+ def _warmed_up(self):
154
+ return (self._loss_counts == self.history_per_term).all()
house_diffusion/respace.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch as th
3
+
4
+ from .gaussian_diffusion import GaussianDiffusion
5
+
6
+
7
+ def space_timesteps(num_timesteps, section_counts):
8
+ """
9
+ Create a list of timesteps to use from an original diffusion process,
10
+ given the number of timesteps we want to take from equally-sized portions
11
+ of the original process.
12
+
13
+ For example, if there's 300 timesteps and the section counts are [10,15,20]
14
+ then the first 100 timesteps are strided to be 10 timesteps, the second 100
15
+ are strided to be 15 timesteps, and the final 100 are strided to be 20.
16
+
17
+ If the stride is a string starting with "ddim", then the fixed striding
18
+ from the DDIM paper is used, and only one section is allowed.
19
+
20
+ :param num_timesteps: the number of diffusion steps in the original
21
+ process to divide up.
22
+ :param section_counts: either a list of numbers, or a string containing
23
+ comma-separated numbers, indicating the step count
24
+ per section. As a special case, use "ddimN" where N
25
+ is a number of steps to use the striding from the
26
+ DDIM paper.
27
+ :return: a set of diffusion steps from the original process to use.
28
+ """
29
+ if isinstance(section_counts, str):
30
+ if section_counts.startswith("ddim"):
31
+ desired_count = int(section_counts[len("ddim") :])
32
+ for i in range(1, num_timesteps):
33
+ if len(range(0, num_timesteps, i)) == desired_count:
34
+ return set(range(0, num_timesteps, i))
35
+ raise ValueError(
36
+ f"cannot create exactly {num_timesteps} steps with an integer stride"
37
+ )
38
+ section_counts = [int(x) for x in section_counts.split(",")]
39
+ size_per = num_timesteps // len(section_counts)
40
+ extra = num_timesteps % len(section_counts)
41
+ start_idx = 0
42
+ all_steps = []
43
+ for i, section_count in enumerate(section_counts):
44
+ size = size_per + (1 if i < extra else 0)
45
+ if size < section_count:
46
+ raise ValueError(
47
+ f"cannot divide section of {size} steps into {section_count}"
48
+ )
49
+ if section_count <= 1:
50
+ frac_stride = 1
51
+ else:
52
+ frac_stride = (size - 1) / (section_count - 1)
53
+ cur_idx = 0.0
54
+ taken_steps = []
55
+ for _ in range(section_count):
56
+ taken_steps.append(start_idx + round(cur_idx))
57
+ cur_idx += frac_stride
58
+ all_steps += taken_steps
59
+ start_idx += size
60
+ return set(all_steps)
61
+
62
+
63
+ class SpacedDiffusion(GaussianDiffusion):
64
+ """
65
+ A diffusion process which can skip steps in a base diffusion process.
66
+
67
+ :param use_timesteps: a collection (sequence or set) of timesteps from the
68
+ original diffusion process to retain.
69
+ :param kwargs: the kwargs to create the base diffusion process.
70
+ """
71
+
72
+ def __init__(self, use_timesteps, **kwargs):
73
+ self.use_timesteps = set(use_timesteps)
74
+ self.timestep_map = []
75
+ self.original_num_steps = len(kwargs["betas"])
76
+
77
+ base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
78
+ last_alpha_cumprod = 1.0
79
+ new_betas = []
80
+ for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
81
+ if i in self.use_timesteps:
82
+ new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
83
+ last_alpha_cumprod = alpha_cumprod
84
+ self.timestep_map.append(i)
85
+ kwargs["betas"] = np.array(new_betas)
86
+ super().__init__(**kwargs)
87
+
88
+ def p_mean_variance(
89
+ self, model, *args, **kwargs
90
+ ): # pylint: disable=signature-differs
91
+ return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
92
+
93
+ def training_losses(
94
+ self, model, *args, **kwargs
95
+ ): # pylint: disable=signature-differs
96
+ return super().training_losses(self._wrap_model(model), *args, **kwargs)
97
+
98
+ def condition_mean(self, cond_fn, *args, **kwargs):
99
+ return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
100
+
101
+ def condition_score(self, cond_fn, *args, **kwargs):
102
+ return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
103
+
104
+ def _wrap_model(self, model):
105
+ if isinstance(model, _WrappedModel):
106
+ return model
107
+ return _WrappedModel(
108
+ model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
109
+ )
110
+
111
+ def _scale_timesteps(self, t):
112
+ # Scaling is done by the wrapped model.
113
+ return t
114
+
115
+
116
+ class _WrappedModel:
117
+ def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
118
+ self.model = model
119
+ self.timestep_map = timestep_map
120
+ self.rescale_timesteps = rescale_timesteps
121
+ self.original_num_steps = original_num_steps
122
+
123
+ def __call__(self, x, ts, **kwargs):
124
+ map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
125
+ new_ts = map_tensor[ts]
126
+ if self.rescale_timesteps:
127
+ new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
128
+ return self.model(x, new_ts, **kwargs)
house_diffusion/rplanhg_datasets.py ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ import torch as th
4
+
5
+ from PIL import Image, ImageDraw
6
+ import blobfile as bf
7
+ from mpi4py import MPI
8
+ import numpy as np
9
+ from torch.utils.data import DataLoader, Dataset
10
+ from glob import glob
11
+ import json
12
+ import os
13
+ import cv2 as cv
14
+ from tqdm import tqdm
15
+ from shapely import geometry as gm
16
+ from shapely.ops import unary_union
17
+ from collections import defaultdict
18
+ import copy
19
+
20
+
21
+ def load_rplanhg_data(
22
+ batch_size,
23
+ analog_bit,
24
+ target_set,
25
+ set_name='train',
26
+ ):
27
+ """
28
+ For a dataset, create a generator over (shapes, kwargs) pairs.
29
+ """
30
+ # set_name = 'train'
31
+ set_name = 'eval'
32
+ print(f"loading {set_name} of target set {target_set}")
33
+ deterministic = False if set_name == 'train' else True
34
+ dataset = RPlanhgDataset(set_name, analog_bit, target_set)
35
+ if deterministic:
36
+ loader = DataLoader(
37
+ dataset, batch_size=batch_size, shuffle=False, num_workers=2, drop_last=False
38
+ )
39
+ else:
40
+ loader = DataLoader(
41
+ dataset, batch_size=batch_size, shuffle=True, num_workers=2, drop_last=False
42
+ )
43
+ while True:
44
+ yield from loader
45
+
46
+
47
+ def make_non_manhattan(poly, polygon, house_poly):
48
+ dist = abs(poly[2] - poly[0])
49
+ direction = np.argmin(dist)
50
+ center = poly.mean(0)
51
+ min = poly.min(0)
52
+ max = poly.max(0)
53
+
54
+ tmp = np.random.randint(3, 7)
55
+ new_min_y = center[1] - (max[1] - min[1]) / tmp
56
+ new_max_y = center[1] + (max[1] - min[1]) / tmp
57
+ if center[0] < 128:
58
+ new_min_x = min[0] - (max[0] - min[0]) / np.random.randint(2, 5)
59
+ new_max_x = center[0]
60
+ poly1 = [[min[0], min[1]], [new_min_x, new_min_y], [new_min_x, new_max_y], [min[0], max[1]], [max[0], max[1]],
61
+ [max[0], min[1]]]
62
+ else:
63
+ new_min_x = center[0]
64
+ new_max_x = max[0] + (max[0] - min[0]) / np.random.randint(2, 5)
65
+ poly1 = [[min[0], min[1]], [min[0], max[1]], [max[0], max[1]], [new_max_x, new_max_y], [new_max_x, new_min_y],
66
+ [max[0], min[1]]]
67
+
68
+ new_min_x = center[0] - (max[0] - min[0]) / tmp
69
+ new_max_x = center[0] + (max[0] - min[0]) / tmp
70
+ if center[1] < 128:
71
+ new_min_y = min[1] - (max[1] - min[1]) / np.random.randint(2, 5)
72
+ new_max_y = center[1]
73
+ poly2 = [[min[0], min[1]], [min[0], max[1]], [max[0], max[1]], [max[0], min[1]], [new_max_x, new_min_y],
74
+ [new_min_x, new_min_y]]
75
+ else:
76
+ new_min_y = center[1]
77
+ new_max_y = max[1] + (max[1] - min[1]) / np.random.randint(2, 5)
78
+ poly2 = [[min[0], min[1]], [min[0], max[1]], [new_min_x, new_max_y], [new_max_x, new_max_y], [max[0], max[1]],
79
+ [max[0], min[1]]]
80
+ p1 = gm.Polygon(poly1)
81
+ iou1 = house_poly.intersection(p1).area / p1.area
82
+ p2 = gm.Polygon(poly2)
83
+ iou2 = house_poly.intersection(p2).area / p2.area
84
+ if iou1 > 0.9 and iou2 > 0.9:
85
+ return poly
86
+ if iou1 < iou2:
87
+ return poly1
88
+ else:
89
+ return poly2
90
+
91
+
92
+ get_bin = lambda x, z: [int(y) for y in format(x, 'b').zfill(z)]
93
+ get_one_hot = lambda x, z: np.eye(z)[min(x, z - 1)]
94
+
95
+
96
+ class RPlanhgDataset(Dataset):
97
+ def __init__(self, set_name, analog_bit, target_set, non_manhattan=False):
98
+ super().__init__()
99
+ base_dir = '../datasets/rplan'
100
+ self.non_manhattan = non_manhattan
101
+ self.set_name = set_name
102
+ self.analog_bit = analog_bit
103
+ self.target_set = target_set
104
+ self.subgraphs = []
105
+ self.org_graphs = []
106
+ self.org_houses = []
107
+ max_num_points = 100
108
+ if self.set_name == 'eval':
109
+ cnumber_dist = np.load(f'processed_rplan/rplan_train_{target_set}_cndist.npz', allow_pickle=True)[
110
+ 'cnumber_dist'].item()
111
+ if os.path.exists(f'processed_rplan/rplan_{set_name}_{target_set}.npz'):
112
+ data = np.load(f'processed_rplan/rplan_{set_name}_{target_set}.npz', allow_pickle=True)
113
+ self.graphs = data['graphs']
114
+ self.houses = data['houses']
115
+ self.door_masks = data['door_masks']
116
+ self.self_masks = data['self_masks']
117
+ self.gen_masks = data['gen_masks']
118
+ self.num_coords = 2
119
+ self.max_num_points = max_num_points
120
+ cnumber_dist = np.load(f'processed_rplan/rplan_train_{target_set}_cndist.npz', allow_pickle=True)[
121
+ 'cnumber_dist'].item()
122
+ if self.set_name == 'eval':
123
+ data = np.load(f'processed_rplan/rplan_{set_name}_{target_set}_syn.npz', allow_pickle=True)
124
+ self.syn_graphs = data['graphs']
125
+ self.syn_houses = data['houses']
126
+ self.syn_door_masks = data['door_masks']
127
+ self.syn_self_masks = data['self_masks']
128
+ self.syn_gen_masks = data['gen_masks']
129
+ else:
130
+ with open(f'{base_dir}/list.txt') as f:
131
+ lines = f.readlines()
132
+ cnt = 0
133
+
134
+ # TODO
135
+ failed_plans = []
136
+
137
+ for line in tqdm(lines):
138
+ # cnt=cnt+1
139
+ # file_name = f'{base_dir}/{line[:-1]}'
140
+ # rms_type, fp_eds,rms_bbs,eds_to_rms=reader(file_name)
141
+ # fp_size = len([x for x in rms_type if x != 15 and x != 17])
142
+ # if self.set_name=='train' and fp_size == target_set:
143
+ # continue
144
+ # if self.set_name=='eval' and fp_size != target_set:
145
+ # continue
146
+ # a = [rms_type, rms_bbs, fp_eds, eds_to_rms]
147
+ # self.subgraphs.append(a)
148
+
149
+ # for graph in tqdm(self.subgraphs):
150
+ try:
151
+ cnt = cnt + 1
152
+ file_name = f'{base_dir}/{line[:-1]}'
153
+ rms_type, fp_eds, rms_bbs, eds_to_rms = reader(file_name)
154
+ fp_size = len([x for x in rms_type if x != 15 and x != 17])
155
+ if self.set_name == 'train' and fp_size == target_set:
156
+ continue
157
+ if self.set_name == 'eval' and fp_size != target_set:
158
+ continue
159
+ graph = [rms_type, rms_bbs, fp_eds, eds_to_rms]
160
+ rms_type = graph[0]
161
+ rms_bbs = graph[1]
162
+ fp_eds = graph[2]
163
+ eds_to_rms = graph[3]
164
+ rms_bbs = np.array(rms_bbs)
165
+ fp_eds = np.array(fp_eds)
166
+
167
+ # extract boundary box and centralize
168
+ tl = np.min(rms_bbs[:, :2], 0)
169
+ br = np.max(rms_bbs[:, 2:], 0)
170
+ shift = (tl + br) / 2.0 - 0.5
171
+ rms_bbs[:, :2] -= shift
172
+ rms_bbs[:, 2:] -= shift
173
+ fp_eds[:, :2] -= shift
174
+ fp_eds[:, 2:] -= shift
175
+ tl -= shift
176
+ br -= shift
177
+
178
+ # build input graph
179
+ graph_nodes, graph_edges, rooms_mks = self.build_graph(rms_type, fp_eds, eds_to_rms)
180
+
181
+ house = []
182
+ for room_mask, room_type in zip(rooms_mks, graph_nodes):
183
+ room_mask = room_mask.astype(np.uint8)
184
+ room_mask = cv.resize(room_mask, (256, 256), interpolation=cv.INTER_AREA)
185
+ contours, _ = cv.findContours(room_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
186
+ contours = contours[0]
187
+ house.append([contours[:, 0, :], room_type])
188
+ self.org_graphs.append(graph_edges)
189
+ self.org_houses.append(house)
190
+
191
+ except IndexError:
192
+ # print(line)
193
+ failed_plans.append(line)
194
+ print("failed: ", failed_plans)
195
+ print("len: ", len(failed_plans))
196
+
197
+ houses = []
198
+ door_masks = []
199
+ self_masks = []
200
+ gen_masks = []
201
+ graphs = []
202
+ if self.set_name == 'train':
203
+ cnumber_dist = defaultdict(list)
204
+
205
+ if self.non_manhattan:
206
+ for h, graph in tqdm(zip(self.org_houses, self.org_graphs), desc='processing dataset'):
207
+ # Generating non-manhattan Balconies
208
+ tmp = []
209
+ for i, room in enumerate(h):
210
+ if room[1] > 10:
211
+ continue
212
+ if len(room[0]) != 4:
213
+ continue
214
+ if np.random.randint(2):
215
+ continue
216
+ poly = gm.Polygon(room[0])
217
+ house_polygon = unary_union([gm.Polygon(room[0]) for room in h])
218
+ room[0] = make_non_manhattan(room[0], poly, house_polygon)
219
+
220
+ for h, graph in tqdm(zip(self.org_houses, self.org_graphs), desc='processing dataset'):
221
+ house = []
222
+ corner_bounds = []
223
+ num_points = 0
224
+ for i, room in enumerate(h):
225
+ if room[1] > 10:
226
+ room[1] = {15: 11, 17: 12, 16: 13}[room[1]]
227
+ room[0] = np.reshape(room[0], [len(room[0]),
228
+ 2]) / 256. - 0.5 # [[x0,y0],[x1,y1],...,[x15,y15]] and map to 0-1 - > -0.5, 0.5
229
+ room[0] = room[0] * 2 # map to [-1, 1]
230
+ if self.set_name == 'train':
231
+ cnumber_dist[room[1]].append(len(room[0]))
232
+ # Adding conditions
233
+ num_room_corners = len(room[0])
234
+ rtype = np.repeat(np.array([get_one_hot(room[1], 25)]), num_room_corners, 0)
235
+ room_index = np.repeat(np.array([get_one_hot(len(house) + 1, 32)]), num_room_corners, 0)
236
+ corner_index = np.array([get_one_hot(x, 32) for x in range(num_room_corners)])
237
+ # Src_key_padding_mask
238
+ padding_mask = np.repeat(1, num_room_corners)
239
+ padding_mask = np.expand_dims(padding_mask, 1)
240
+ # Generating corner bounds for attention masks
241
+ connections = np.array([[i, (i + 1) % num_room_corners] for i in range(num_room_corners)])
242
+ connections += num_points
243
+ corner_bounds.append([num_points, num_points + num_room_corners])
244
+ num_points += num_room_corners
245
+ room = np.concatenate((room[0], rtype, corner_index, room_index, padding_mask, connections), 1)
246
+ house.append(room)
247
+
248
+ house_layouts = np.concatenate(house, 0)
249
+ if len(house_layouts) > max_num_points:
250
+ continue
251
+ padding = np.zeros((max_num_points - len(house_layouts), 94))
252
+ gen_mask = np.ones((max_num_points, max_num_points))
253
+ gen_mask[:len(house_layouts), :len(house_layouts)] = 0
254
+ house_layouts = np.concatenate((house_layouts, padding), 0)
255
+
256
+ door_mask = np.ones((max_num_points, max_num_points))
257
+ self_mask = np.ones((max_num_points, max_num_points))
258
+ for i in range(len(corner_bounds)):
259
+ for j in range(len(corner_bounds)):
260
+ if i == j:
261
+ self_mask[corner_bounds[i][0]:corner_bounds[i][1],
262
+ corner_bounds[j][0]:corner_bounds[j][1]] = 0
263
+ elif any(np.equal([i, 1, j], graph).all(1)) or any(np.equal([j, 1, i], graph).all(1)):
264
+ door_mask[corner_bounds[i][0]:corner_bounds[i][1],
265
+ corner_bounds[j][0]:corner_bounds[j][1]] = 0
266
+ houses.append(house_layouts)
267
+ door_masks.append(door_mask)
268
+ self_masks.append(self_mask)
269
+ gen_masks.append(gen_mask)
270
+ graphs.append(graph)
271
+ self.max_num_points = max_num_points
272
+ self.houses = houses
273
+ self.door_masks = door_masks
274
+ self.self_masks = self_masks
275
+ self.gen_masks = gen_masks
276
+ self.num_coords = 2
277
+ self.graphs = graphs
278
+
279
+ # --------------
280
+ # graph_dict = {f'graph_{i}': graph for i, graph in enumerate(self.graphs)}
281
+ for i, graph in enumerate(self.graphs):
282
+ print(f"Graph {i}: shape = {np.shape(graph)}, type = {type(graph)}")
283
+
284
+ # Save each graph individually within a dictionary
285
+ # graph_dict = {f'graph_{i}': graph for i, graph in enumerate(self.graphs)}
286
+
287
+ np.savez_compressed(f'processed_rplan/rplan_{set_name}_{target_set}', graphs=self.graphs,
288
+ houses=self.houses,
289
+ # np.savez_compressed(f'processed_rplan/rplan_{set_name}_{target_set}', **graph_dict, houses=self.houses,
290
+ door_masks=self.door_masks, self_masks=self.self_masks, gen_masks=self.gen_masks)
291
+ if self.set_name == 'train':
292
+ np.savez_compressed(f'processed_rplan/rplan_{set_name}_{target_set}_cndist', cnumber_dist=cnumber_dist)
293
+
294
+ if set_name == 'eval':
295
+ houses = []
296
+ graphs = []
297
+ door_masks = []
298
+ self_masks = []
299
+ gen_masks = []
300
+ len_house_layouts = 0
301
+ for h, graph in tqdm(zip(self.org_houses, self.org_graphs), desc='processing dataset'):
302
+ house = []
303
+ corner_bounds = []
304
+ num_points = 0
305
+ # num_room_corners_total = [cnumber_dist[room[1]][random.randint(0, len(cnumber_dist[room[1]])-1)] for room in h]
306
+ # while np.sum(num_room_corners_total)>=max_num_points:
307
+ # num_room_corners_total = [cnumber_dist[room[1]][random.randint(0, len(cnumber_dist[room[1]])-1)] for room in h]
308
+ num_room_corners_total = []
309
+ for room in h:
310
+ room_type = room[1]
311
+ default_value = 4
312
+ if room_type in cnumber_dist and cnumber_dist[room_type]:
313
+ num_room_corners_total.append(
314
+ cnumber_dist[room_type][random.randint(0, len(cnumber_dist[room_type]) - 1)]
315
+ )
316
+ else:
317
+ # Handle the case where cnumber_dist[room_type] is missing or empty
318
+ print(f"Warning: No data found for room type {room_type}. Assigning default value.")
319
+ default_value = 4 # Assign a reasonable default value or handle accordingly
320
+ num_room_corners_total.append(default_value)
321
+
322
+ while np.sum(num_room_corners_total) >= max_num_points:
323
+ num_room_corners_total = []
324
+ for room in h:
325
+ room_type = room[1]
326
+ if room_type in cnumber_dist and cnumber_dist[room_type]:
327
+ num_room_corners_total.append(
328
+ cnumber_dist[room_type][random.randint(0, len(cnumber_dist[room_type]) - 1)]
329
+ )
330
+ else:
331
+ num_room_corners_total.append(default_value)
332
+
333
+ for i, room in enumerate(h):
334
+ # Adding conditions
335
+ num_room_corners = num_room_corners_total[i]
336
+ rtype = np.repeat(np.array([get_one_hot(room[1], 25)]), num_room_corners, 0)
337
+ room_index = np.repeat(np.array([get_one_hot(len(house) + 1, 32)]), num_room_corners, 0)
338
+ corner_index = np.array([get_one_hot(x, 32) for x in range(num_room_corners)])
339
+ # Src_key_padding_mask
340
+ padding_mask = np.repeat(1, num_room_corners)
341
+ padding_mask = np.expand_dims(padding_mask, 1)
342
+ # Generating corner bounds for attention masks
343
+ connections = np.array([[i, (i + 1) % num_room_corners] for i in range(num_room_corners)])
344
+ connections += num_points
345
+ corner_bounds.append([num_points, num_points + num_room_corners])
346
+ num_points += num_room_corners
347
+ room = np.concatenate((np.zeros([num_room_corners, 2]), rtype, corner_index, room_index,
348
+ padding_mask, connections), 1)
349
+ house.append(room)
350
+
351
+ house_layouts = np.concatenate(house, 0)
352
+ if np.sum([len(room[0]) for room in h]) > max_num_points:
353
+ continue
354
+ padding = np.zeros((max_num_points - len(house_layouts), 94))
355
+ gen_mask = np.ones((max_num_points, max_num_points))
356
+ gen_mask[:len(house_layouts), :len(house_layouts)] = 0
357
+ house_layouts = np.concatenate((house_layouts, padding), 0)
358
+
359
+ door_mask = np.ones((max_num_points, max_num_points))
360
+ self_mask = np.ones((max_num_points, max_num_points))
361
+ for i, room in enumerate(h):
362
+ if room[1] == 1:
363
+ living_room_index = i
364
+ break
365
+ for i in range(len(corner_bounds)):
366
+ is_connected = False
367
+ for j in range(len(corner_bounds)):
368
+ if i == j:
369
+ self_mask[corner_bounds[i][0]:corner_bounds[i][1],
370
+ corner_bounds[j][0]:corner_bounds[j][1]] = 0
371
+ elif any(np.equal([i, 1, j], graph).all(1)) or any(np.equal([j, 1, i], graph).all(1)):
372
+ door_mask[corner_bounds[i][0]:corner_bounds[i][1],
373
+ corner_bounds[j][0]:corner_bounds[j][1]] = 0
374
+ is_connected = True
375
+ if not is_connected:
376
+ door_mask[corner_bounds[i][0]:corner_bounds[i][1],
377
+ corner_bounds[living_room_index][0]:corner_bounds[living_room_index][1]] = 0
378
+
379
+ houses.append(house_layouts)
380
+ door_masks.append(door_mask)
381
+ self_masks.append(self_mask)
382
+ gen_masks.append(gen_mask)
383
+ graphs.append(graph)
384
+ self.syn_houses = houses
385
+ self.syn_door_masks = door_masks
386
+ self.syn_self_masks = self_masks
387
+ self.syn_gen_masks = gen_masks
388
+ self.syn_graphs = graphs
389
+ np.savez_compressed(f'processed_rplan/rplan_{set_name}_{target_set}_syn', graphs=self.syn_graphs,
390
+ houses=self.syn_houses,
391
+ door_masks=self.syn_door_masks, self_masks=self.syn_self_masks,
392
+ gen_masks=self.syn_gen_masks)
393
+
394
+
395
+ def __len__(self):
396
+ return len(self.houses)
397
+
398
+ def __getitem__(self, idx):
399
+ # idx = int(idx//20)
400
+ arr = self.houses[idx][:, :self.num_coords]
401
+ graph = np.concatenate((self.graphs[idx], np.zeros([200 - len(self.graphs[idx]), 3])), 0)
402
+
403
+ cond = {
404
+ 'door_mask': self.door_masks[idx],
405
+ 'self_mask': self.self_masks[idx],
406
+ 'gen_mask': self.gen_masks[idx],
407
+ 'room_types': self.houses[idx][:, self.num_coords:self.num_coords + 25],
408
+ 'corner_indices': self.houses[idx][:, self.num_coords + 25:self.num_coords + 57],
409
+ 'room_indices': self.houses[idx][:, self.num_coords + 57:self.num_coords + 89],
410
+ 'src_key_padding_mask': 1 - self.houses[idx][:, self.num_coords + 89],
411
+ 'connections': self.houses[idx][:, self.num_coords + 90:self.num_coords + 92],
412
+ 'graph': graph,
413
+ }
414
+ if self.set_name == 'eval':
415
+ syn_graph = np.concatenate((self.syn_graphs[idx], np.zeros([200 - len(self.syn_graphs[idx]), 3])), 0)
416
+ assert (graph == syn_graph).all(), idx
417
+ cond.update({
418
+ 'syn_door_mask': self.syn_door_masks[idx],
419
+ 'syn_self_mask': self.syn_self_masks[idx],
420
+ 'syn_gen_mask': self.syn_gen_masks[idx],
421
+ 'syn_room_types': self.syn_houses[idx][:, self.num_coords:self.num_coords + 25],
422
+ 'syn_corner_indices': self.syn_houses[idx][:, self.num_coords + 25:self.num_coords + 57],
423
+ 'syn_room_indices': self.syn_houses[idx][:, self.num_coords + 57:self.num_coords + 89],
424
+ 'syn_src_key_padding_mask': 1 - self.syn_houses[idx][:, self.num_coords + 89],
425
+ 'syn_connections': self.syn_houses[idx][:, self.num_coords + 90:self.num_coords + 92],
426
+ 'syn_graph': syn_graph,
427
+ })
428
+ if self.set_name == 'train':
429
+ #### Random Rotate
430
+ rotation = random.randint(0, 3)
431
+ if rotation == 1:
432
+ arr[:, [0, 1]] = arr[:, [1, 0]]
433
+ arr[:, 0] = -arr[:, 0]
434
+ elif rotation == 2:
435
+ arr[:, [0, 1]] = -arr[:, [1, 0]]
436
+ elif rotation == 3:
437
+ arr[:, [0, 1]] = arr[:, [1, 0]]
438
+ arr[:, 1] = -arr[:, 1]
439
+
440
+ ## To generate any rotation uncomment this
441
+
442
+ # if self.non_manhattan:
443
+ # theta = random.random()*np.pi/2
444
+ # rot_mat = np.array([[np.cos(theta), -np.sin(theta), 0],
445
+ # [np.sin(theta), np.cos(theta), 0]])
446
+ # arr = np.matmul(arr,rot_mat)[:,:2]
447
+
448
+ # Random Scale
449
+ # arr = arr * np.random.normal(1., .5)
450
+
451
+ # Random Shift
452
+ # arr[:, 0] = arr[:, 0] + np.random.normal(0., .1)
453
+ # arr[:, 1] = arr[:, 1] + np.random.normal(0., .1)
454
+
455
+ if not self.analog_bit:
456
+ arr = np.transpose(arr, [1, 0])
457
+ return arr.astype(float), cond
458
+ else:
459
+ ONE_HOT_RES = 256
460
+ arr_onehot = np.zeros((ONE_HOT_RES * 2, arr.shape[1])) - 1
461
+ xs = ((arr[:, 0] + 1) * (ONE_HOT_RES / 2)).astype(int)
462
+ ys = ((arr[:, 1] + 1) * (ONE_HOT_RES / 2)).astype(int)
463
+ xs = np.array([get_bin(x, 8) for x in xs])
464
+ ys = np.array([get_bin(x, 8) for x in ys])
465
+ arr_onehot = np.concatenate([xs, ys], 1)
466
+ arr_onehot = np.transpose(arr_onehot, [1, 0])
467
+ arr_onehot[arr_onehot == 0] = -1
468
+ return arr_onehot.astype(float), cond
469
+
470
+ def make_sequence(self, edges):
471
+ polys = []
472
+ v_curr = tuple(edges[0][:2])
473
+ e_ind_curr = 0
474
+ e_visited = [0]
475
+ seq_tracker = [v_curr]
476
+ find_next = False
477
+ while len(e_visited) < len(edges):
478
+ if find_next == False:
479
+ if v_curr == tuple(edges[e_ind_curr][2:]):
480
+ v_curr = tuple(edges[e_ind_curr][:2])
481
+ else:
482
+ v_curr = tuple(edges[e_ind_curr][2:])
483
+ find_next = not find_next
484
+ else:
485
+ # look for next edge
486
+ for k, e in enumerate(edges):
487
+ if k not in e_visited:
488
+ if (v_curr == tuple(e[:2])):
489
+ v_curr = tuple(e[2:])
490
+ e_ind_curr = k
491
+ e_visited.append(k)
492
+ break
493
+ elif (v_curr == tuple(e[2:])):
494
+ v_curr = tuple(e[:2])
495
+ e_ind_curr = k
496
+ e_visited.append(k)
497
+ break
498
+
499
+ # extract next sequence
500
+ if v_curr == seq_tracker[-1]:
501
+ polys.append(seq_tracker)
502
+ for k, e in enumerate(edges):
503
+ if k not in e_visited:
504
+ v_curr = tuple(edges[0][:2])
505
+ seq_tracker = [v_curr]
506
+ find_next = False
507
+ e_ind_curr = k
508
+ e_visited.append(k)
509
+ break
510
+ else:
511
+ seq_tracker.append(v_curr)
512
+ polys.append(seq_tracker)
513
+
514
+ return polys
515
+
516
+ def build_graph(self, rms_type, fp_eds, eds_to_rms, out_size=64):
517
+ # create edges
518
+ triples = []
519
+ nodes = rms_type
520
+ # encode connections
521
+ for k in range(len(nodes)):
522
+ for l in range(len(nodes)):
523
+ if l > k:
524
+ is_adjacent = any([True for e_map in eds_to_rms if (l in e_map) and (k in e_map)])
525
+ if is_adjacent:
526
+ if 'train' in self.set_name:
527
+ triples.append([k, 1, l])
528
+ else:
529
+ triples.append([k, 1, l])
530
+ else:
531
+ if 'train' in self.set_name:
532
+ triples.append([k, -1, l])
533
+ else:
534
+ triples.append([k, -1, l])
535
+ # get rooms masks
536
+ eds_to_rms_tmp = []
537
+ for l in range(len(eds_to_rms)):
538
+ eds_to_rms_tmp.append([eds_to_rms[l][0]])
539
+ rms_masks = []
540
+ im_size = 256
541
+ fp_mk = np.zeros((out_size, out_size))
542
+ for k in range(len(nodes)):
543
+ # add rooms and doors
544
+ eds = []
545
+ for l, e_map in enumerate(eds_to_rms_tmp):
546
+ if (k in e_map):
547
+ eds.append(l)
548
+ # draw rooms
549
+ rm_im = Image.new('L', (im_size, im_size))
550
+ dr = ImageDraw.Draw(rm_im)
551
+ for eds_poly in [eds]:
552
+ poly = self.make_sequence(np.array([fp_eds[l][:4] for l in eds_poly]))[0]
553
+ poly = [(im_size * x, im_size * y) for x, y in poly]
554
+ if len(poly) >= 2:
555
+ dr.polygon(poly, fill='white')
556
+ else:
557
+ print("Empty room")
558
+ exit(0)
559
+ rm_im = rm_im.resize((out_size, out_size))
560
+ rm_arr = np.array(rm_im)
561
+ inds = np.where(rm_arr > 0)
562
+ rm_arr[inds] = 1.0
563
+ rms_masks.append(rm_arr)
564
+ if rms_type[k] != 15 and rms_type[k] != 17:
565
+ fp_mk[inds] = k + 1
566
+ # trick to remove overlap
567
+ for k in range(len(nodes)):
568
+ if rms_type[k] != 15 and rms_type[k] != 17:
569
+ rm_arr = np.zeros((out_size, out_size))
570
+ inds = np.where(fp_mk == k + 1)
571
+ rm_arr[inds] = 1.0
572
+ rms_masks[k] = rm_arr
573
+ # convert to array
574
+ nodes = np.array(nodes)
575
+ triples = np.array(triples)
576
+ rms_masks = np.array(rms_masks)
577
+ return nodes, triples, rms_masks
578
+
579
+
580
+ def is_adjacent(box_a, box_b, threshold=0.03):
581
+ x0, y0, x1, y1 = box_a
582
+ x2, y2, x3, y3 = box_b
583
+ h1, h2 = x1 - x0, x3 - x2
584
+ w1, w2 = y1 - y0, y3 - y2
585
+ xc1, xc2 = (x0 + x1) / 2.0, (x2 + x3) / 2.0
586
+ yc1, yc2 = (y0 + y1) / 2.0, (y2 + y3) / 2.0
587
+ delta_x = np.abs(xc2 - xc1) - (h1 + h2) / 2.0
588
+ delta_y = np.abs(yc2 - yc1) - (w1 + w2) / 2.0
589
+ delta = max(delta_x, delta_y)
590
+ return delta < threshold
591
+
592
+
593
+ def reader(filename):
594
+ with open(filename) as f:
595
+ info = json.load(f)
596
+ rms_bbs = np.asarray(info['boxes'])
597
+ fp_eds = info['edges']
598
+ rms_type = info['room_type']
599
+ eds_to_rms = info['ed_rm']
600
+ s_r = 0
601
+ for rmk in range(len(rms_type)):
602
+ if (rms_type[rmk] != 17):
603
+ s_r = s_r + 1
604
+ rms_bbs = np.array(rms_bbs) / 256.0
605
+ fp_eds = np.array(fp_eds) / 256.0
606
+ fp_eds = fp_eds[:, :4]
607
+ tl = np.min(rms_bbs[:, :2], 0)
608
+ br = np.max(rms_bbs[:, 2:], 0)
609
+ shift = (tl + br) / 2.0 - 0.5
610
+ rms_bbs[:, :2] -= shift
611
+ rms_bbs[:, 2:] -= shift
612
+ fp_eds[:, :2] -= shift
613
+ fp_eds[:, 2:] -= shift
614
+ tl -= shift
615
+ br -= shift
616
+ return rms_type, fp_eds, rms_bbs, eds_to_rms
617
+
618
+
619
+ if __name__ == '__main__':
620
+ dataset = RPlanhgDataset('eval', False, 8)
house_diffusion/script_util.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import inspect
3
+
4
+ from . import gaussian_diffusion as gd
5
+ from .respace import SpacedDiffusion, space_timesteps
6
+ from .transformer import TransformerModel
7
+
8
+ def diffusion_defaults():
9
+ """
10
+ Defaults for image and classifier training.
11
+ """
12
+ return dict(
13
+ analog_bit=False,
14
+ learn_sigma=False,
15
+ # diffusion_steps=25,
16
+ diffusion_steps=1000,
17
+ noise_schedule="cosine",
18
+ timestep_respacing="ddim100",
19
+ use_kl=False,
20
+ predict_xstart=False,
21
+ rescale_timesteps=False,
22
+ rescale_learned_sigmas=False,
23
+ # target_set=-1,
24
+
25
+ # target_set=4,
26
+ # target_set=5,
27
+ # target_set=6,
28
+ # target_set=7,
29
+ target_set=8,
30
+
31
+ set_name='',
32
+ )
33
+
34
+ def update_arg_parser(args):
35
+ args.num_channels = 512
36
+ num_coords = 16 if args.analog_bit else 2
37
+ if args.dataset=='rplan':
38
+ args.input_channels = num_coords + (2*8 if not args.analog_bit else 0) # . , . , . , . , '
39
+ args.condition_channels = 89
40
+ args.out_channels = num_coords * 1
41
+ args.use_unet = False
42
+
43
+ elif args.dataset=='st3d':
44
+ args.input_channels = num_coords + (2*8 if not args.analog_bit else 0) # . , . , . , . , '
45
+ args.condition_channels = 89
46
+ args.out_channels = num_coords * 1
47
+ args.use_unet = False
48
+
49
+ elif args.dataset=='zind':
50
+ args.input_channels = num_coords + 2 * 8
51
+ args.condition_channels = 89
52
+ args.out_channels = num_coords * 1
53
+ args.use_unet = False
54
+
55
+ elif args.dataset=='layout':
56
+ args.use_unet = True
57
+ pass #TODO NEED TO COMPLETE
58
+
59
+ elif args.dataset=='outdoor':
60
+ args.use_unet = True
61
+ pass #TODO NEED TO COMPLETE
62
+ else:
63
+ assert False, "DATASET NOT FOUND"
64
+
65
+ def model_and_diffusion_defaults():
66
+ """
67
+ Defaults for image training.
68
+ """
69
+ res = dict(
70
+ dataset='rplan',
71
+ # dataset='',
72
+ use_checkpoint=False,
73
+ input_channels=0,
74
+ condition_channels=0,
75
+ out_channels=0,
76
+ use_unet=False,
77
+ num_channels=128
78
+ )
79
+ res.update(diffusion_defaults())
80
+ return res
81
+
82
+ def create_model_and_diffusion(
83
+ input_channels,
84
+ condition_channels,
85
+ num_channels,
86
+ out_channels,
87
+ dataset,
88
+ use_checkpoint,
89
+ use_unet,
90
+ learn_sigma,
91
+ diffusion_steps,
92
+ noise_schedule,
93
+ timestep_respacing,
94
+ use_kl,
95
+ predict_xstart,
96
+ rescale_timesteps,
97
+ rescale_learned_sigmas,
98
+ analog_bit,
99
+ target_set,
100
+ set_name,
101
+ ):
102
+ model = TransformerModel(input_channels, condition_channels, num_channels, out_channels, dataset, use_checkpoint, use_unet, analog_bit)
103
+
104
+ diffusion = create_gaussian_diffusion(
105
+ steps=diffusion_steps,
106
+ learn_sigma=learn_sigma,
107
+ noise_schedule=noise_schedule,
108
+ use_kl=use_kl,
109
+ predict_xstart=predict_xstart,
110
+ rescale_timesteps=rescale_timesteps,
111
+ rescale_learned_sigmas=rescale_learned_sigmas,
112
+ timestep_respacing=timestep_respacing,
113
+ )
114
+ return model, diffusion
115
+
116
+ def create_gaussian_diffusion(
117
+ *,
118
+ steps=1000,
119
+ learn_sigma=False,
120
+ sigma_small=False,
121
+ noise_schedule="linear",
122
+ use_kl=False,
123
+ predict_xstart=False,
124
+ rescale_timesteps=False,
125
+ rescale_learned_sigmas=False,
126
+ timestep_respacing="",
127
+ ):
128
+ betas = gd.get_named_beta_schedule(noise_schedule, steps)
129
+ if use_kl:
130
+ loss_type = gd.LossType.RESCALED_KL
131
+ elif rescale_learned_sigmas:
132
+ loss_type = gd.LossType.RESCALED_MSE
133
+ else:
134
+ loss_type = gd.LossType.MSE
135
+ if not timestep_respacing:
136
+ timestep_respacing = [steps]
137
+ return SpacedDiffusion(
138
+ use_timesteps=space_timesteps(steps, timestep_respacing),
139
+ betas=betas,
140
+ model_mean_type=(
141
+ gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
142
+ ),
143
+ model_var_type=(
144
+ (
145
+ gd.ModelVarType.FIXED_LARGE
146
+ if not sigma_small
147
+ else gd.ModelVarType.FIXED_SMALL
148
+ )
149
+ if not learn_sigma
150
+ else gd.ModelVarType.LEARNED_RANGE
151
+ ),
152
+ loss_type=loss_type,
153
+ rescale_timesteps=rescale_timesteps,
154
+ )
155
+
156
+
157
+ def add_dict_to_argparser(parser, default_dict):
158
+ for k, v in default_dict.items():
159
+ v_type = type(v)
160
+ if v is None:
161
+ v_type = str
162
+ elif isinstance(v, bool):
163
+ v_type = str2bool
164
+ parser.add_argument(f"--{k}", default=v, type=v_type)
165
+
166
+
167
+ def args_to_dict(args, keys):
168
+ return {k: getattr(args, k) for k in keys}
169
+
170
+
171
+ def str2bool(v):
172
+ """
173
+ https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
174
+ """
175
+ if isinstance(v, bool):
176
+ return v
177
+ if v.lower() in ("yes", "true", "t", "y", "1"):
178
+ return True
179
+ elif v.lower() in ("no", "false", "f", "n", "0"):
180
+ return False
181
+ else:
182
+ raise argparse.ArgumentTypeError("boolean value expected")
house_diffusion/train_util.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import functools
3
+ import os
4
+
5
+ import blobfile as bf
6
+ import torch as th
7
+ import torch.distributed as dist
8
+ from torch.nn.parallel.distributed import DistributedDataParallel as DDP
9
+ from torch.optim import AdamW
10
+
11
+ from . import dist_util, logger
12
+ from .fp16_util import MixedPrecisionTrainer
13
+ from .nn import update_ema
14
+ from .resample import LossAwareSampler, UniformSampler
15
+
16
+ # For ImageNet experiments, this was a good default value.
17
+ # We found that the lg_loss_scale quickly climbed to
18
+ # 20-21 within the first ~1K steps of training.
19
+ INITIAL_LOG_LOSS_SCALE = 20.0
20
+
21
+
22
+ class TrainLoop:
23
+ def __init__(
24
+ self,
25
+ *,
26
+ model,
27
+ diffusion,
28
+ data,
29
+ batch_size,
30
+ microbatch,
31
+ lr,
32
+ ema_rate,
33
+ log_interval,
34
+ save_interval,
35
+ resume_checkpoint,
36
+ use_fp16=False,
37
+ fp16_scale_growth=1e-3,
38
+ schedule_sampler=None,
39
+ weight_decay=0.0,
40
+ lr_anneal_steps=0,
41
+ analog_bit=None,
42
+ ):
43
+ self.analog_bit = analog_bit
44
+ self.model = model
45
+ self.diffusion = diffusion
46
+ self.data = data
47
+ self.batch_size = batch_size
48
+ self.microbatch = microbatch if microbatch > 0 else batch_size
49
+ self.lr = lr
50
+ self.ema_rate = (
51
+ [ema_rate]
52
+ if isinstance(ema_rate, float)
53
+ else [float(x) for x in ema_rate.split(",")]
54
+ )
55
+ self.log_interval = log_interval
56
+ self.save_interval = save_interval
57
+ self.resume_checkpoint = resume_checkpoint
58
+ self.use_fp16 = use_fp16
59
+ self.fp16_scale_growth = fp16_scale_growth
60
+ self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
61
+ self.weight_decay = weight_decay
62
+ self.lr_anneal_steps = lr_anneal_steps
63
+
64
+ self.step = 0
65
+ self.resume_step = 0
66
+ self.global_batch = self.batch_size * dist.get_world_size()
67
+
68
+ self.sync_cuda = th.cuda.is_available()
69
+
70
+ # TODO ------------------------------------------------------------------------
71
+ pretrained_path = "../ckpts/exp/model250000.pt"
72
+ pretrained_path = False
73
+
74
+ if pretrained_path:
75
+ self.load_pretrained(pretrained_path)
76
+ self.count_parameters_by_layer()
77
+
78
+ from .transformer_models import TransformerModels
79
+
80
+ device = th.device('cuda' if th.cuda.is_available() else 'cpu')
81
+ # self.model.to(device)
82
+ # print(th.get_default_device())
83
+ # th.set_default_device('cuda')
84
+ # print(th.get_default_device())
85
+
86
+ transformer_model = TransformerModels(self.model, device)
87
+ self.model_name = "Def"
88
+
89
+ # self.model = transformer_model.replace_InstanceNorm1d_LayerNorm()
90
+ # self.model_name = "Norm_LayerNorm"
91
+ # self.model = transformer_model.set_affine_true_for_instance_norm()
92
+ # self.model_name = "Norm_affine"
93
+ #
94
+ # self.model = transformer_model.replace_activation_function("GELU")
95
+ # self.model_name = "Activation_GELU"
96
+ # self.model = transformer_model.replace_activation_function("LeakyReLU")
97
+ # self.model_name = "Activation_LeakyRelu"
98
+ # self.model = transformer_model.replace_activation_function("ELU")
99
+ # self.model_name = "Activation_ELU"
100
+ # self.model = transformer_model.replace_activation_function("Mish")
101
+ # self.model_name = "Activation_Mish"
102
+ #
103
+ # self.model = transformer_model.add_encoder_layers(num_new_layers=2)
104
+ # self.model_name = "EncoderLayers_2"
105
+ # self.model = transformer_model.add_encoder_layers(num_new_layers=4)
106
+ # self.model_name = "EncoderLayers_4"
107
+ #
108
+ # self.model = transformer_model.dropout_value_change(val=0.01)
109
+ # self.model_name = "Dropout_01"
110
+ # self.model = transformer_model.dropout_value_change(val=0.001)
111
+ # self.model_name = "Dropout_001"
112
+ # self.model = transformer_model.dropout_value_change(val=0.9)
113
+ # self.model_name = "Dropout_9"
114
+ #
115
+ # self.model = transformer_model.change_linear_output_layers()
116
+ # self.model_name = "OutputLayer"
117
+ #
118
+ # self.model = transformer_model.add_cross_attention()
119
+ # self.model_name = "CrossAttention"
120
+ #
121
+ # self.model_name = "lr_001"
122
+ # self.model_name = "lr_00001"
123
+ #
124
+ # self.model_name = "wd_01"
125
+
126
+ self.model_name = ""
127
+
128
+ print(self.model)
129
+ self.count_parameters_by_layer()
130
+
131
+ # TODO ------------------------------------------------------------------------
132
+
133
+ self.mp_trainer = MixedPrecisionTrainer(
134
+ model=self.model,
135
+ use_fp16=self.use_fp16,
136
+ fp16_scale_growth=fp16_scale_growth,
137
+ )
138
+
139
+ self.opt = AdamW(
140
+ self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
141
+ )
142
+
143
+ if self.resume_step:
144
+ self._load_optimizer_state()
145
+ # Model was resumed, either due to a restart or a checkpoint
146
+ # being specified at the command line.
147
+ self.ema_params = [
148
+ self._load_ema_parameters(rate) for rate in self.ema_rate
149
+ ]
150
+ else:
151
+ self.ema_params = [
152
+ copy.deepcopy(self.mp_trainer.master_params)
153
+ for _ in range(len(self.ema_rate))
154
+ ]
155
+
156
+ if th.cuda.is_available():
157
+ self.use_ddp = True
158
+ self.ddp_model = DDP(
159
+ self.model,
160
+ device_ids=[dist_util.dev()],
161
+ output_device=dist_util.dev(),
162
+ broadcast_buffers=False,
163
+ bucket_cap_mb=128,
164
+ find_unused_parameters=False,
165
+ )
166
+ else:
167
+ if dist.get_world_size() > 1:
168
+ logger.warn(
169
+ "Distributed training requires CUDA. "
170
+ "Gradients will not be synchronized properly!"
171
+ )
172
+ self.use_ddp = False
173
+ self.ddp_model = self.model
174
+
175
+ # TODO----------------------------------------------------------------------------------
176
+ def count_parameters(self):
177
+ model = self.model
178
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
179
+ untrainable_params = sum(p.numel() for p in model.parameters() if not p.requires_grad)
180
+
181
+ print(f"Trainable parameters: {trainable_params}")
182
+ print(f"Untrainable parameters: {untrainable_params}")
183
+ return trainable_params, untrainable_params
184
+
185
+ def count_parameters_by_layer(self):
186
+ print(f"{'Layer':<55} {'Trainable Params':<20} {'Untrainable Params':<20}")
187
+ print("=" * 95)
188
+
189
+ for name, param in self.model.named_parameters():
190
+ if param.requires_grad:
191
+ trainable_params = param.numel()
192
+ untrainable_params = 0
193
+ else:
194
+ trainable_params = 0
195
+ untrainable_params = param.numel()
196
+
197
+ print(f"{name:<55} {trainable_params:<20} {untrainable_params:<20}")
198
+
199
+ print("=" * 95)
200
+ total_trainable = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
201
+ total_untrainable = sum(p.numel() for p in self.model.parameters() if not p.requires_grad)
202
+
203
+ print(f"{'Total':<55} {total_trainable:<20} {total_untrainable:<20}")
204
+
205
+ def load_pretrained(self, pretrained_path):
206
+ state_dict = th.load(pretrained_path, map_location=dist_util.dev())
207
+ self.model.load_state_dict(state_dict)
208
+ print(self.model)
209
+ logger.log(f"Loaded pretrained model from {pretrained_path}")
210
+
211
+ # --------------------------------------------------------------------------------------
212
+
213
+ def _load_and_sync_parameters(self):
214
+ resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
215
+
216
+ if resume_checkpoint:
217
+ self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
218
+ # if dist.get_rank() == 0:
219
+ logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
220
+ self.model.load_state_dict(
221
+ dist_util.load_state_dict(
222
+ resume_checkpoint, map_location=dist_util.dev()
223
+ )
224
+ )
225
+
226
+ dist_util.sync_params(self.model.parameters())
227
+
228
+ def _load_ema_parameters(self, rate):
229
+ ema_params = copy.deepcopy(self.mp_trainer.master_params)
230
+
231
+ main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
232
+ ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
233
+ if ema_checkpoint:
234
+ if dist.get_rank() == 0:
235
+ logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
236
+ state_dict = dist_util.load_state_dict(
237
+ ema_checkpoint, map_location=dist_util.dev()
238
+ )
239
+ ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
240
+
241
+ dist_util.sync_params(ema_params)
242
+ return ema_params
243
+
244
+ def _load_optimizer_state(self):
245
+ main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
246
+ opt_checkpoint = bf.join(
247
+ bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
248
+ )
249
+ if bf.exists(opt_checkpoint):
250
+ logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
251
+ state_dict = dist_util.load_state_dict(
252
+ opt_checkpoint, map_location=dist_util.dev()
253
+ )
254
+ self.opt.load_state_dict(state_dict)
255
+
256
+ def run_loop(self):
257
+ while (
258
+ not self.lr_anneal_steps
259
+ or self.step + self.resume_step < self.lr_anneal_steps
260
+ ):
261
+ batch, cond = next(self.data)
262
+ self.run_step(batch, cond)
263
+ # TODO: change 100000 for new lr
264
+ if self.step % 100000 == 0:
265
+ lr = self.lr * (0.1 ** (self.step // 100000))
266
+ logger.log(f"Step {self.step}: Updating learning rate to {lr}")
267
+ for param_group in self.opt.param_groups:
268
+ param_group["lr"] = lr
269
+ if self.step % self.log_interval == 0:
270
+ logger.dumpkvs()
271
+ if self.step % self.save_interval == 0 and self.step > 0:
272
+ self.save()
273
+ # Run for a finite amount of time in integration tests.
274
+ if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
275
+ return
276
+ self.step += 1
277
+ # Save the last checkpoint if it wasn't already saved.
278
+ if (self.step - 1) % self.save_interval != 0:
279
+ self.save()
280
+
281
+ def run_step(self, batch, cond):
282
+ self.forward_backward(batch, cond)
283
+ took_step = self.mp_trainer.optimize(self.opt)
284
+ if took_step:
285
+ self._update_ema()
286
+ self._anneal_lr()
287
+ self.log_step()
288
+
289
+ def forward_backward(self, batch, cond):
290
+ self.mp_trainer.zero_grad()
291
+ for i in range(0, batch.shape[0], self.microbatch):
292
+ micro = batch[i: i + self.microbatch].to(dist_util.dev())
293
+ micro_cond = {
294
+ k: v[i: i + self.microbatch].to(dist_util.dev())
295
+ for k, v in cond.items()
296
+ }
297
+ model_kwargs = micro_cond
298
+
299
+ last_batch = (i + self.microbatch) >= batch.shape[0]
300
+ t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
301
+
302
+ compute_losses = functools.partial(
303
+ self.diffusion.training_losses,
304
+ self.ddp_model,
305
+ micro,
306
+ t,
307
+ model_kwargs=model_kwargs,
308
+ analog_bit=self.analog_bit,
309
+ )
310
+
311
+ if last_batch or not self.use_ddp:
312
+ losses = compute_losses()
313
+ else:
314
+ with self.ddp_model.no_sync():
315
+ losses = compute_losses()
316
+
317
+ if isinstance(self.schedule_sampler, LossAwareSampler):
318
+ self.schedule_sampler.update_with_local_losses(
319
+ t, losses["loss"].detach()
320
+ )
321
+
322
+ loss = (losses["loss"] * weights).mean()
323
+ log_loss_dict(
324
+ self.diffusion, t, {k: v * weights for k, v in losses.items()}
325
+ )
326
+ self.mp_trainer.backward(loss)
327
+
328
+ def _update_ema(self):
329
+ for rate, params in zip(self.ema_rate, self.ema_params):
330
+ update_ema(params, self.mp_trainer.master_params, rate=rate)
331
+
332
+ def _anneal_lr(self):
333
+ if not self.lr_anneal_steps:
334
+ return
335
+ frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
336
+ lr = self.lr * (1 - frac_done)
337
+ for param_group in self.opt.param_groups:
338
+ param_group["lr"] = lr
339
+
340
+ def log_step(self):
341
+ logger.logkv("step", self.step + self.resume_step)
342
+ logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
343
+
344
+ def save(self):
345
+ def save_checkpoint(rate, params):
346
+ state_dict = self.mp_trainer.master_params_to_state_dict(params)
347
+ if dist.get_rank() == 0:
348
+ logger.log(f"saving model {rate}...")
349
+ if not rate:
350
+ filename = f"model{(self.step + self.resume_step):06d}.pt"
351
+ else:
352
+ filename = f"ema_{rate}_{(self.step + self.resume_step):06d}.pt"
353
+
354
+ filename = self.model_name + "_" + filename
355
+
356
+ with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
357
+ th.save(state_dict, f)
358
+
359
+ save_checkpoint(0, self.mp_trainer.master_params)
360
+ for rate, params in zip(self.ema_rate, self.ema_params):
361
+ save_checkpoint(rate, params)
362
+
363
+ if dist.get_rank() == 0:
364
+ with bf.BlobFile(
365
+ bf.join(get_blob_logdir(), f"opt{(self.step + self.resume_step):06d}.pt"),
366
+ "wb",
367
+ ) as f:
368
+ th.save(self.opt.state_dict(), f)
369
+
370
+ dist.barrier()
371
+
372
+
373
+ def parse_resume_step_from_filename(filename):
374
+ """
375
+ Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
376
+ checkpoint's number of steps.
377
+ """
378
+ split = filename.split("model")
379
+ if len(split) < 2:
380
+ return 0
381
+ split1 = split[-1].split(".")[0]
382
+ try:
383
+ return int(split1)
384
+ except ValueError:
385
+ return 0
386
+
387
+
388
+ def get_blob_logdir():
389
+ # You can change this to be a separate path to save checkpoints to
390
+ # a blobstore or some external drive.
391
+ return logger.get_dir()
392
+
393
+
394
+ def find_resume_checkpoint():
395
+ # On your infrastructure, you may want to override this to automatically
396
+ # discover the latest checkpoint on your blob storage, etc.
397
+ return None
398
+
399
+
400
+ def find_ema_checkpoint(main_checkpoint, step, rate):
401
+ if main_checkpoint is None:
402
+ return None
403
+ filename = f"ema_{rate}_{(step):06d}.pt"
404
+ path = bf.join(bf.dirname(main_checkpoint), filename)
405
+ if bf.exists(path):
406
+ return path
407
+ return None
408
+
409
+
410
+ def log_loss_dict(diffusion, ts, losses):
411
+ for key, values in losses.items():
412
+ logger.logkv_mean(key, values.mean().item())
413
+ # Log the quantiles (four quartiles, in particular).
414
+ for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
415
+ quartile = int(4 * sub_t / diffusion.num_timesteps)
416
+ logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
house_diffusion/transformer.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch as th
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from .nn import timestep_embedding
7
+
8
+ def dec2bin(xinp, bits):
9
+ mask = 2 ** th.arange(bits - 1, -1, -1).to(xinp.device, xinp.dtype)
10
+ return xinp.unsqueeze(-1).bitwise_and(mask).ne(0).float()
11
+
12
+ class PositionalEncoding(nn.Module):
13
+
14
+ def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
15
+ super().__init__()
16
+ self.dropout = nn.Dropout(p=dropout)
17
+
18
+ position = th.arange(max_len).unsqueeze(1)
19
+ div_term = th.exp(th.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
20
+ pe = th.zeros(1, max_len, d_model)
21
+ pe[0, :, 0::2] = th.sin(position * div_term)
22
+ pe[0, :, 1::2] = th.cos(position * div_term)
23
+ self.register_buffer('pe', pe)
24
+
25
+ def forward(self, x):
26
+ """
27
+ Args:
28
+ x: Tensor, shape [batch_size, seq_len, embedding_dim]
29
+ """
30
+ x = x + self.pe[0:1, :x.size(1)]
31
+ return self.dropout(x)
32
+
33
+ class FeedForward(nn.Module):
34
+ def __init__(self, d_model, d_ff, dropout, activation):
35
+ super().__init__()
36
+ # We set d_ff as a default to 2048
37
+ self.linear_1 = nn.Linear(d_model, d_ff)
38
+ self.dropout = nn.Dropout(dropout)
39
+ self.linear_2 = nn.Linear(d_ff, d_model)
40
+ self.activation = activation
41
+ def forward(self, x):
42
+ x = self.dropout(self.activation(self.linear_1(x)))
43
+ x = self.linear_2(x)
44
+ return x
45
+
46
+ def attention(q, k, v, d_k, mask=None, dropout=None):
47
+ scores = th.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
48
+ if mask is not None:
49
+ mask = mask.unsqueeze(1)
50
+ scores = scores.masked_fill(mask == 1, -1e9)
51
+ scores = F.softmax(scores, dim=-1)
52
+ if dropout is not None:
53
+ scores = dropout(scores)
54
+ output = th.matmul(scores, v)
55
+ return output
56
+
57
+ class MultiHeadAttention(nn.Module):
58
+ def __init__(self, heads, d_model, dropout = 0.1):
59
+ super().__init__()
60
+ self.d_model = d_model
61
+ self.d_k = d_model // heads
62
+ self.h = heads
63
+ self.q_linear = nn.Linear(d_model, d_model)
64
+ self.v_linear = nn.Linear(d_model, d_model)
65
+ self.k_linear = nn.Linear(d_model, d_model)
66
+ self.dropout = nn.Dropout(dropout)
67
+ self.out = nn.Linear(d_model, d_model)
68
+
69
+ def forward(self, q, k, v, mask=None):
70
+ bs = q.size(0)
71
+ # perform linear operation and split into h heads
72
+ k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
73
+ q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
74
+ v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
75
+ # transpose to get dimensions bs * h * sl * d_model
76
+ k = k.transpose(1,2)
77
+ q = q.transpose(1,2)
78
+ v = v.transpose(1,2)# calculate attention using function we will define next
79
+ #TODO
80
+ mask = mask.to('cuda:0')
81
+ scores = attention(q, k, v, self.d_k, mask, self.dropout)
82
+ # concatenate heads and put through final linear layer
83
+ concat = scores.transpose(1,2).contiguous().view(bs, -1, self.d_model)
84
+ output = self.out(concat)
85
+ return output
86
+
87
+ class EncoderLayer(nn.Module):
88
+ def __init__(self, d_model, heads, dropout, activation):
89
+ super().__init__()
90
+ self.norm_1 = nn.InstanceNorm1d(d_model)
91
+ self.norm_2 = nn.InstanceNorm1d(d_model)
92
+ self.self_attn = MultiHeadAttention(heads, d_model)
93
+ self.door_attn = MultiHeadAttention(heads, d_model)
94
+ self.gen_attn = MultiHeadAttention(heads, d_model)
95
+ self.ff = FeedForward(d_model, d_model*2, dropout, activation)
96
+ self.dropout = nn.Dropout(dropout)
97
+
98
+ def forward(self, x, door_mask, self_mask, gen_mask):
99
+ assert (gen_mask.max()==1 and gen_mask.min()==0), f"{gen_mask.max()}, {gen_mask.min()}"
100
+ x2 = self.norm_1(x)
101
+ x = x + self.dropout(self.door_attn(x2,x2,x2,door_mask)) \
102
+ + self.dropout(self.self_attn(x2, x2, x2, self_mask)) \
103
+ + self.dropout(self.gen_attn(x2, x2, x2, gen_mask))
104
+ x2 = self.norm_2(x)
105
+ x = x + self.dropout(self.ff(x2))
106
+ return x
107
+
108
+ class TransformerModel(nn.Module):
109
+ """
110
+ The full Transformer model with timestep embedding.
111
+ """
112
+
113
+ def __init__(
114
+ self,
115
+ in_channels,
116
+ condition_channels,
117
+ model_channels,
118
+ out_channels,
119
+ dataset,
120
+ use_checkpoint,
121
+ use_unet,
122
+ analog_bit,
123
+ ):
124
+ super().__init__()
125
+ self.in_channels = in_channels
126
+ self.condition_channels = condition_channels
127
+ self.model_channels = model_channels
128
+ self.out_channels = out_channels
129
+ self.time_channels = model_channels
130
+ self.use_checkpoint = use_checkpoint
131
+ self.analog_bit = analog_bit
132
+ self.use_unet = use_unet
133
+ self.num_layers = 4
134
+
135
+ # self.pos_encoder = PositionalEncoding(model_channels, 0.001)
136
+ # self.activation = nn.SiLU()
137
+ self.activation = nn.ReLU()
138
+
139
+ self.time_embed = nn.Sequential(
140
+ nn.Linear(self.model_channels, self.model_channels),
141
+ nn.SiLU(),
142
+ nn.Linear(self.model_channels, self.time_channels),
143
+ )
144
+ self.input_emb = nn.Linear(self.in_channels, self.model_channels)
145
+ self.condition_emb = nn.Linear(self.condition_channels, self.model_channels)
146
+
147
+ if use_unet:
148
+ self.unet = UNet(self.model_channels, 1)
149
+
150
+ self.transformer_layers = nn.ModuleList([EncoderLayer(self.model_channels, 4, 0.1, self.activation) for x in range(self.num_layers)])
151
+ # self.transformer_layers = nn.ModuleList([nn.TransformerEncoderLayer(self.model_channels, 4, self.model_channels*2, 0.1, self.activation, batch_first=True) for x in range(self.num_layers)])
152
+
153
+ self.output_linear1 = nn.Linear(self.model_channels, self.model_channels)
154
+ self.output_linear2 = nn.Linear(self.model_channels, self.model_channels//2)
155
+ self.output_linear3 = nn.Linear(self.model_channels//2, self.out_channels)
156
+
157
+ if not self.analog_bit:
158
+ self.output_linear_bin1 = nn.Linear(162+self.model_channels, self.model_channels)
159
+ self.output_linear_bin2 = EncoderLayer(self.model_channels, 1, 0.1, self.activation)
160
+ self.output_linear_bin3 = EncoderLayer(self.model_channels, 1, 0.1, self.activation)
161
+ self.output_linear_bin4 = nn.Linear(self.model_channels, 16)
162
+
163
+ print(f"Number of model parameters: {sum(p.numel() for p in self.parameters() if p.requires_grad)}")
164
+
165
+ def expand_points(self, points, connections):
166
+ def average_points(point1, point2):
167
+ points_new = (point1+point2)/2
168
+ return points_new
169
+ p1 = points
170
+ p1 = p1.view([p1.shape[0], p1.shape[1], 2, -1])
171
+ p5 = points[th.arange(points.shape[0])[:, None], connections[:,:,1].long()]
172
+ p5 = p5.view([p5.shape[0], p5.shape[1], 2, -1])
173
+ p3 = average_points(p1, p5)
174
+ p2 = average_points(p1, p3)
175
+ p4 = average_points(p3, p5)
176
+ p1_5 = average_points(p1, p2)
177
+ p2_5 = average_points(p2, p3)
178
+ p3_5 = average_points(p3, p4)
179
+ p4_5 = average_points(p4, p5)
180
+ points_new = th.cat((p1.view_as(points), p1_5.view_as(points), p2.view_as(points),
181
+ p2_5.view_as(points), p3.view_as(points), p3_5.view_as(points), p4.view_as(points), p4_5.view_as(points), p5.view_as(points)), 2)
182
+ return points_new.detach()
183
+
184
+ def create_image(self, points, connections, room_indices, img_size=256, res=200):
185
+ img = th.zeros((points.shape[0], 1, img_size, img_size), device=points.device)
186
+ points = (points+1)*(img_size//2)
187
+ points[points>=img_size] = img_size-1
188
+ points[points<0] = 0
189
+ p1 = points
190
+ p2 = points[th.arange(points.shape[0])[:, None], connections[:,:,1].long()]
191
+
192
+ slope = (p2[:,:,1]-p1[:,:,1])/((p2[:,:,0]-p1[:,:,0]))
193
+ slope[slope.isnan()] = 0
194
+ slope[slope.isinf()] = 1
195
+
196
+ m = th.linspace(0, 1, res, device=points.device)
197
+ new_shape = [p2.shape[0], res, p2.shape[1], p2.shape[2]]
198
+
199
+ new_p2 = p2.unsqueeze(1).expand(new_shape)
200
+ new_p1 = p1.unsqueeze(1).expand(new_shape)
201
+ new_room_indices = room_indices.unsqueeze(1).expand([p2.shape[0], res, p2.shape[1], 1])
202
+
203
+ inc = new_p2 - new_p1
204
+
205
+ xs = m.view(1,-1,1) * inc[:,:,:,0]
206
+ xs = xs + new_p1[:,:,:,0]
207
+ xs = xs.long()
208
+
209
+ x_inc = th.where(inc[:,:,:,0]==0, inc[:,:,:,1], inc[:,:,:,0])
210
+ x_inc = m.view(1,-1,1) * x_inc
211
+ ys = x_inc * slope.unsqueeze(1) + new_p1[:,:,:,1]
212
+ ys = ys.long()
213
+
214
+ img[th.arange(xs.shape[0])[:, None], :, xs.view(img.shape[0], -1), ys.view(img.shape[0], -1)] = new_room_indices.reshape(img.shape[0], -1, 1).float()
215
+ return img.detach()
216
+
217
+ def forward(self, x, timesteps, xtalpha, epsalpha, is_syn=False, **kwargs):
218
+ """
219
+ Apply the model to an input batch.
220
+
221
+ :param x: an [N x S x C] Tensor of inputs.
222
+ :param timesteps: a 1-D batch of timesteps.
223
+ :param y: an [N] Tensor of labels, if class-conditional.
224
+ :return: an [N x S x C] Tensor of outputs.
225
+ """
226
+ # prefix = 'syn_' if is_syn else ''
227
+ prefix = 'syn_' if is_syn else ''
228
+ x = x.permute([0, 2, 1]).float() # -> convert [N x C x S] to [N x S x C]
229
+
230
+ if not self.analog_bit:
231
+ x = self.expand_points(x, kwargs[f'{prefix}connections'])
232
+
233
+ # Different input embeddings (Input, Time, Conditions)
234
+ #TODO---------------------------------------------------------------
235
+ x = x.to('cuda:0')
236
+ timesteps = timesteps.to(x.device)
237
+ # print(x.device)
238
+
239
+ time_emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
240
+ time_emb = time_emb.unsqueeze(1)
241
+ input_emb = self.input_emb(x)
242
+ if self.condition_channels>0:
243
+ cond = None
244
+ for key in [f'{prefix}room_types', f'{prefix}corner_indices', f'{prefix}room_indices']:
245
+ if cond is None:
246
+ cond = kwargs[key]
247
+ else:
248
+ cond = th.cat((cond, kwargs[key]), 2)
249
+ #TODO
250
+ cond = cond.to('cuda:0')
251
+ cond_emb = self.condition_emb(cond.float())
252
+
253
+ # PositionalEncoding and DM model
254
+ out = input_emb + cond_emb + time_emb.repeat((1, input_emb.shape[1], 1))
255
+ for layer in self.transformer_layers:
256
+ out = layer(out, kwargs[f'{prefix}door_mask'], kwargs[f'{prefix}self_mask'], kwargs[f'{prefix}gen_mask'])
257
+
258
+ out_dec = self.output_linear1(out)
259
+ out_dec = self.activation(out_dec)
260
+ out_dec = self.output_linear2(out_dec)
261
+ out_dec = self.output_linear3(out_dec)
262
+
263
+ if not self.analog_bit:
264
+ out_bin_start = x*xtalpha.repeat([1,1,9]) - out_dec.repeat([1,1,9]) * epsalpha.repeat([1,1,9])
265
+ out_bin = (out_bin_start/2 + 0.5) # -> [0,1]
266
+ out_bin = out_bin * 256 #-> [0, 256]
267
+ out_bin = dec2bin(out_bin.round().int(), 8)
268
+ out_bin_inp = out_bin.reshape([x.shape[0], x.shape[1], 16*9])
269
+ out_bin_inp[out_bin_inp==0] = -1
270
+
271
+ out_bin = th.cat((out_bin_start, out_bin_inp, cond_emb), 2)
272
+ out_bin = self.activation(self.output_linear_bin1(out_bin))
273
+ out_bin = self.output_linear_bin2(out_bin, kwargs[f'{prefix}door_mask'], kwargs[f'{prefix}self_mask'], kwargs[f'{prefix}gen_mask'])
274
+ out_bin = self.output_linear_bin3(out_bin, kwargs[f'{prefix}door_mask'], kwargs[f'{prefix}self_mask'], kwargs[f'{prefix}gen_mask'])
275
+ out_bin = self.output_linear_bin4(out_bin)
276
+
277
+ out_bin = out_bin.permute([0, 2, 1]) # -> convert back [N x S x C] to [N x C x S]
278
+
279
+ out_dec = out_dec.permute([0, 2, 1]) # -> convert back [N x S x C] to [N x C x S]
280
+
281
+ if not self.analog_bit:
282
+ return out_dec, out_bin
283
+ else:
284
+ return out_dec, None
house_diffusion/transformer_models.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ from .transformer import TransformerModel, EncoderLayer
3
+
4
+
5
+ # class TransformerModels(nn.Module):
6
+ class TransformerModels:
7
+ def __init__(self, model, device):
8
+ self.model = model
9
+ self.device = device
10
+
11
+ """ ------------------------------- 1) Normalize ------------------------------- """
12
+
13
+ def replace_InstanceNorm1d_LayerNorm(self):
14
+ self.freeze_unfreeze(True)
15
+ for name, layer in self.model.named_modules():
16
+ if isinstance(layer, nn.InstanceNorm1d):
17
+ num_features = layer.num_features
18
+ new_layer = nn.LayerNorm(normalized_shape=num_features).to(self.device)
19
+ parent_module = dict(self.model.named_modules())[name.rsplit('.', 1)[0]]
20
+ setattr(parent_module, name.split('.')[-1], new_layer)
21
+
22
+ return self.model
23
+
24
+ def set_affine_true_for_instance_norm(self):
25
+ self.freeze_unfreeze(True)
26
+ for name, layer in self.model.named_modules():
27
+ if isinstance(layer, nn.InstanceNorm1d):
28
+ new_layer = nn.InstanceNorm1d(num_features=100, affine=True).to(self.device)
29
+ parent_module = dict(self.model.named_modules())[name.rsplit('.', 1)[0]]
30
+ setattr(parent_module, name.split('.')[-1], new_layer)
31
+
32
+ return self.model
33
+
34
+ """ ---------------------------------------------------------------------------- """
35
+ """ -------------------------- 2) Activation Function -------------------------- """
36
+
37
+ def replace_activation_function(self, activation):
38
+ self.freeze_unfreeze(True)
39
+ functions = {
40
+ "GELU": nn.GELU(),
41
+ "LeakyReLU": nn.LeakyReLU(),
42
+ "ELU": nn.ELU(),
43
+ "Mish": nn.Mish(),
44
+ # "ReLU": nn.ReLU(),
45
+ }
46
+
47
+ def replace_activation_in_module(module, activation_layer):
48
+ for name, child in module.named_children():
49
+ if isinstance(child, nn.ReLU):
50
+ setattr(module, name, activation_layer)
51
+ else:
52
+ replace_activation_in_module(child, activation_layer)
53
+
54
+ new_activation_layer = functions[activation].to(self.device)
55
+ replace_activation_in_module(self.model, new_activation_layer)
56
+ return self.model
57
+
58
+ """ ---------------------------------------------------------------------------- """
59
+ """ ---------------------------- 3) New Encoder Layers ------------------------- """
60
+
61
+ def add_encoder_layers(self, num_new_layers=2):
62
+ self.freeze_unfreeze(True)
63
+ new_encoder_layers = [EncoderLayer(512, 4, 0.1, nn.ReLU()).to(self.device) for _ in range(num_new_layers)]
64
+
65
+ for i, new_layer in enumerate(new_encoder_layers):
66
+ self.model.transformer_layers.insert(4 + i, new_layer.to(self.device))
67
+
68
+ return self.model
69
+
70
+ """ ---------------------------------------------------------------------------- """
71
+ """ -------------------------------- 4) Dropout -------------------------------- """
72
+
73
+ # def dropout_value_change(self, val=0.1):
74
+ # self.freeze_unfreeze(True)
75
+ # for layer in self.model.modules():
76
+ # if isinstance(layer, nn.Dropout):
77
+ # layer.p = val
78
+ #
79
+ # return self.model
80
+
81
+ def dropout_value_change(self, val=0.1):
82
+ self.freeze_unfreeze(True)
83
+
84
+ def replace_dropouts_in_module(module, rate):
85
+ for name, child in module.named_children():
86
+ if isinstance(child, nn.Dropout):
87
+ setattr(module, name, nn.Dropout(rate).to(self.device))
88
+ else:
89
+ replace_dropouts_in_module(child, rate)
90
+
91
+ replace_dropouts_in_module(self.model, val)
92
+
93
+ return self.model
94
+
95
+ """ ---------------------------------------------------------------------------- """
96
+ """ ------------------------- 5) Output linear layers -------------------------- """
97
+
98
+ def change_linear_output_layers(self):
99
+ output_layers_names = [
100
+ "output_linear1",
101
+ "output_linear2",
102
+ "output_linear3",
103
+ "output_linear_bin1",
104
+ "output_linear_bin2",
105
+ "output_linear_bin3",
106
+ ]
107
+ for name, param in self.model.named_parameters():
108
+ param.requires_grad = False
109
+ if name.split(".")[0] in output_layers_names:
110
+ param.requires_grad = True
111
+
112
+ output_linear1 = self.model.output_linear1
113
+ output_linear2 = self.model.output_linear2
114
+ output_linear3 = self.model.output_linear3
115
+ output_linear_bin1 = self.model.output_linear_bin1
116
+ output_linear_bin2 = self.model.output_linear_bin2
117
+ output_linear_bin3 = self.model.output_linear_bin3
118
+
119
+ output_linear11 = nn.Linear(output_linear1.out_features,
120
+ output_linear1.out_features).to(self.device)
121
+ output_linear21 = nn.Linear(output_linear2.out_features,
122
+ output_linear2.out_features).to(self.device)
123
+
124
+ # self.model.output_layers = nn.Sequential(
125
+ # output_linear1,
126
+ # output_linear11,
127
+ # output_linear2,
128
+ # output_linear21,
129
+ # output_linear3,
130
+ # output_linear_bin1,
131
+ # output_linear_bin2,
132
+ # output_linear_bin3,
133
+ # )
134
+ self.model.insert(6, output_linear11)
135
+ self.model.insert(8, output_linear21)
136
+
137
+ return self.model
138
+
139
+ # def change_linear_output_layers(self):
140
+ # output_layers_names = [
141
+ # "output_linear1",
142
+ # "output_linear2",
143
+ # "output_linear3",
144
+ # "output_linear_bin1",
145
+ # "output_linear_bin2",
146
+ # "output_linear_bin3",
147
+ # ]
148
+ # for name, param in self.model.named_parameters():
149
+ # param.requires_grad = False
150
+ # if name.split(".")[0] in output_layers_names:
151
+ # param.requires_grad = True
152
+ #
153
+ # output_linear1 = self.model.output_linear1
154
+ # output_linear2 = self.model.output_linear2
155
+ # output_linear3 = self.model.output_linear3
156
+ # # output_linear_bin1 = self.model.output_linear_bin1
157
+ # # output_linear_bin2 = self.model.output_linear_bin2
158
+ # # output_linear_bin3 = self.model.output_linear_bin3
159
+ #
160
+ # output_linear11 = nn.Linear(output_linear1.out_features,
161
+ # output_linear1.out_features).to(self.device)
162
+ # output_linear21 = nn.Linear(output_linear2.out_features,
163
+ # output_linear2.out_features).to(self.device)
164
+ #
165
+ # self.model.output_linear1.append(output_linear11.to(self.device))
166
+ # self.model.output_linear2.append(output_linear21.to(self.device))
167
+ #
168
+ # # self.model.output_layers = nn.Sequential(
169
+ # # output_linear1,
170
+ # # output_linear11,
171
+ # # output_linear2,
172
+ # # output_linear21,
173
+ # # output_linear3,
174
+ # # output_linear_bin1,
175
+ # # output_linear_bin2,
176
+ # # output_linear_bin3,
177
+ # # )
178
+ #
179
+ # return self.model
180
+
181
+ """ ---------------------------------------------------------------------------- """
182
+ """ ---------------------------- 6) Cross-Attention ---------------------------- """
183
+
184
+ def add_cross_attention(self, embed_dim=512, num_heads=8, dropout=0.1):
185
+ self.freeze_unfreeze(True)
186
+ for idx, layer in enumerate(self.model.transformer_layers):
187
+ cross_attn_layer = CrossAttentionLayer(embed_dim, num_heads, dropout).to(self.device)
188
+ layer.gen_attn = nn.Sequential(layer.gen_attn, cross_attn_layer).to(self.device)
189
+
190
+ return self.model
191
+
192
+ """ ---------------------------------------------------------------------------- """
193
+ """ -------------------------- 7) Residual Connections? ------------------------- """
194
+
195
+ """ ---------------------------------------------------------------------------- """
196
+ """ ------------------------------- 8) Attention Heads? (check if works with same params) ------------------------------- """
197
+
198
+ """ ---------------------------------------------------------------------------- """
199
+ #Add LayerNorm Before/After Attention
200
+
201
+ # ADAM ?
202
+ # weight decay ?
203
+ # learning rate?
204
+
205
+ def freeze_unfreeze(self, flag):
206
+ for param in self.model.parameters():
207
+ param.requires_grad = flag
208
+
209
+ def count_parameters(self):
210
+ model = self.model
211
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
212
+ untrainable_params = sum(p.numel() for p in model.parameters() if not p.requires_grad)
213
+
214
+ print(f"Trainable parameters: {trainable_params}")
215
+ print(f"Untrainable parameters: {untrainable_params}")
216
+ return trainable_params, untrainable_params
217
+
218
+
219
+ class CrossAttentionLayer(nn.Module):
220
+ def __init__(self, embed_dim, num_heads, dropout=0.1):
221
+ super(CrossAttentionLayer, self).__init__()
222
+ self.cross_attn = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
223
+ self.norm = nn.LayerNorm(embed_dim)
224
+ self.dropout = nn.Dropout(dropout)
225
+
226
+ def forward(self, query, key_value, attn_mask=None):
227
+ attn_output, _ = self.cross_attn(query, key_value, key_value, attn_mask=attn_mask)
228
+ return self.norm(self.dropout(attn_output) + query)