Delete module/core_vq.py
Browse files- module/core_vq.py +0 -383
module/core_vq.py
DELETED
@@ -1,383 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
#
|
7 |
-
# This implementation is inspired from
|
8 |
-
# https://github.com/lucidrains/vector-quantize-pytorch
|
9 |
-
# which is released under MIT License. Hereafter, the original license:
|
10 |
-
# MIT License
|
11 |
-
#
|
12 |
-
# Copyright (c) 2020 Phil Wang
|
13 |
-
#
|
14 |
-
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
15 |
-
# of this software and associated documentation files (the "Software"), to deal
|
16 |
-
# in the Software without restriction, including without limitation the rights
|
17 |
-
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
18 |
-
# copies of the Software, and to permit persons to whom the Software is
|
19 |
-
# furnished to do so, subject to the following conditions:
|
20 |
-
#
|
21 |
-
# The above copyright notice and this permission notice shall be included in all
|
22 |
-
# copies or substantial portions of the Software.
|
23 |
-
#
|
24 |
-
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
25 |
-
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
26 |
-
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
27 |
-
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
28 |
-
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
29 |
-
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
30 |
-
# SOFTWARE.
|
31 |
-
|
32 |
-
"""Core vector quantization implementation."""
|
33 |
-
import typing as tp
|
34 |
-
|
35 |
-
from einops import rearrange, repeat
|
36 |
-
import torch
|
37 |
-
from torch import nn
|
38 |
-
import torch.nn.functional as F
|
39 |
-
from tqdm import tqdm
|
40 |
-
|
41 |
-
|
42 |
-
def default(val: tp.Any, d: tp.Any) -> tp.Any:
|
43 |
-
return val if val is not None else d
|
44 |
-
|
45 |
-
|
46 |
-
def ema_inplace(moving_avg, new, decay: float):
|
47 |
-
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
|
48 |
-
|
49 |
-
|
50 |
-
def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5):
|
51 |
-
return (x + epsilon) / (x.sum() + n_categories * epsilon)
|
52 |
-
|
53 |
-
|
54 |
-
def uniform_init(*shape: int):
|
55 |
-
t = torch.empty(shape)
|
56 |
-
nn.init.kaiming_uniform_(t)
|
57 |
-
return t
|
58 |
-
|
59 |
-
|
60 |
-
def sample_vectors(samples, num: int):
|
61 |
-
num_samples, device = samples.shape[0], samples.device
|
62 |
-
|
63 |
-
if num_samples >= num:
|
64 |
-
indices = torch.randperm(num_samples, device=device)[:num]
|
65 |
-
else:
|
66 |
-
indices = torch.randint(0, num_samples, (num,), device=device)
|
67 |
-
|
68 |
-
return samples[indices]
|
69 |
-
|
70 |
-
|
71 |
-
def kmeans(samples, num_clusters: int, num_iters: int = 10):
|
72 |
-
dim, dtype = samples.shape[-1], samples.dtype
|
73 |
-
max_kmeans_samples = 500
|
74 |
-
samples = samples[:max_kmeans_samples, :]
|
75 |
-
means = sample_vectors(samples, num_clusters)
|
76 |
-
|
77 |
-
print("kmeans start ... ")
|
78 |
-
for _ in tqdm(range(num_iters)):
|
79 |
-
diffs = rearrange(samples, "n d -> n () d") - rearrange(means, "c d -> () c d")
|
80 |
-
dists = -(diffs**2).sum(dim=-1)
|
81 |
-
|
82 |
-
buckets = dists.max(dim=-1).indices
|
83 |
-
bins = torch.bincount(buckets, minlength=num_clusters)
|
84 |
-
zero_mask = bins == 0
|
85 |
-
bins_min_clamped = bins.masked_fill(zero_mask, 1)
|
86 |
-
|
87 |
-
new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype)
|
88 |
-
new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples)
|
89 |
-
new_means = new_means / bins_min_clamped[..., None]
|
90 |
-
|
91 |
-
means = torch.where(zero_mask[..., None], means, new_means)
|
92 |
-
|
93 |
-
return means, bins
|
94 |
-
|
95 |
-
|
96 |
-
class EuclideanCodebook(nn.Module):
|
97 |
-
"""Codebook with Euclidean distance.
|
98 |
-
Args:
|
99 |
-
dim (int): Dimension.
|
100 |
-
codebook_size (int): Codebook size.
|
101 |
-
kmeans_init (bool): Whether to use k-means to initialize the codebooks.
|
102 |
-
If set to true, run the k-means algorithm on the first training batch and use
|
103 |
-
the learned centroids as initialization.
|
104 |
-
kmeans_iters (int): Number of iterations used for k-means algorithm at initialization.
|
105 |
-
decay (float): Decay for exponential moving average over the codebooks.
|
106 |
-
epsilon (float): Epsilon value for numerical stability.
|
107 |
-
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
|
108 |
-
that have an exponential moving average cluster size less than the specified threshold with
|
109 |
-
randomly selected vector from the current batch.
|
110 |
-
"""
|
111 |
-
|
112 |
-
def __init__(
|
113 |
-
self,
|
114 |
-
dim: int,
|
115 |
-
codebook_size: int,
|
116 |
-
kmeans_init: int = False,
|
117 |
-
kmeans_iters: int = 10,
|
118 |
-
decay: float = 0.99,
|
119 |
-
epsilon: float = 1e-5,
|
120 |
-
threshold_ema_dead_code: int = 2,
|
121 |
-
):
|
122 |
-
super().__init__()
|
123 |
-
self.decay = decay
|
124 |
-
init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = (
|
125 |
-
uniform_init if not kmeans_init else torch.zeros
|
126 |
-
)
|
127 |
-
embed = init_fn(codebook_size, dim)
|
128 |
-
|
129 |
-
self.codebook_size = codebook_size
|
130 |
-
|
131 |
-
self.kmeans_iters = kmeans_iters
|
132 |
-
self.epsilon = epsilon
|
133 |
-
self.threshold_ema_dead_code = threshold_ema_dead_code
|
134 |
-
|
135 |
-
self.register_buffer("inited", torch.Tensor([not kmeans_init]))
|
136 |
-
self.register_buffer("cluster_size", torch.zeros(codebook_size))
|
137 |
-
self.register_buffer("embed", embed)
|
138 |
-
self.register_buffer("embed_avg", embed.clone())
|
139 |
-
|
140 |
-
@torch.jit.ignore
|
141 |
-
def init_embed_(self, data):
|
142 |
-
if self.inited:
|
143 |
-
return
|
144 |
-
|
145 |
-
embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters)
|
146 |
-
self.embed.data.copy_(embed)
|
147 |
-
self.embed_avg.data.copy_(embed.clone())
|
148 |
-
self.cluster_size.data.copy_(cluster_size)
|
149 |
-
self.inited.data.copy_(torch.Tensor([True]))
|
150 |
-
# Make sure all buffers across workers are in sync after initialization
|
151 |
-
# broadcast_tensors(self.buffers())
|
152 |
-
|
153 |
-
def replace_(self, samples, mask):
|
154 |
-
modified_codebook = torch.where(
|
155 |
-
mask[..., None], sample_vectors(samples, self.codebook_size), self.embed
|
156 |
-
)
|
157 |
-
self.embed.data.copy_(modified_codebook)
|
158 |
-
|
159 |
-
def expire_codes_(self, batch_samples):
|
160 |
-
if self.threshold_ema_dead_code == 0:
|
161 |
-
return
|
162 |
-
|
163 |
-
expired_codes = self.cluster_size < self.threshold_ema_dead_code
|
164 |
-
if not torch.any(expired_codes):
|
165 |
-
return
|
166 |
-
|
167 |
-
batch_samples = rearrange(batch_samples, "... d -> (...) d")
|
168 |
-
self.replace_(batch_samples, mask=expired_codes)
|
169 |
-
# broadcast_tensors(self.buffers())
|
170 |
-
|
171 |
-
def preprocess(self, x):
|
172 |
-
x = rearrange(x, "... d -> (...) d")
|
173 |
-
return x
|
174 |
-
|
175 |
-
def quantize(self, x):
|
176 |
-
embed = self.embed.t()
|
177 |
-
dist = -(
|
178 |
-
x.pow(2).sum(1, keepdim=True)
|
179 |
-
- 2 * x @ embed
|
180 |
-
+ embed.pow(2).sum(0, keepdim=True)
|
181 |
-
)
|
182 |
-
embed_ind = dist.max(dim=-1).indices
|
183 |
-
return embed_ind
|
184 |
-
|
185 |
-
def postprocess_emb(self, embed_ind, shape):
|
186 |
-
return embed_ind.view(*shape[:-1])
|
187 |
-
|
188 |
-
def dequantize(self, embed_ind):
|
189 |
-
quantize = F.embedding(embed_ind, self.embed)
|
190 |
-
return quantize
|
191 |
-
|
192 |
-
def encode(self, x):
|
193 |
-
shape = x.shape
|
194 |
-
# pre-process
|
195 |
-
x = self.preprocess(x)
|
196 |
-
# quantize
|
197 |
-
embed_ind = self.quantize(x)
|
198 |
-
# post-process
|
199 |
-
embed_ind = self.postprocess_emb(embed_ind, shape)
|
200 |
-
return embed_ind
|
201 |
-
|
202 |
-
def decode(self, embed_ind):
|
203 |
-
quantize = self.dequantize(embed_ind)
|
204 |
-
return quantize
|
205 |
-
|
206 |
-
def forward(self, x):
|
207 |
-
shape, dtype = x.shape, x.dtype
|
208 |
-
x = self.preprocess(x)
|
209 |
-
|
210 |
-
self.init_embed_(x)
|
211 |
-
|
212 |
-
embed_ind = self.quantize(x)
|
213 |
-
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
|
214 |
-
embed_ind = self.postprocess_emb(embed_ind, shape)
|
215 |
-
quantize = self.dequantize(embed_ind)
|
216 |
-
|
217 |
-
if self.training:
|
218 |
-
# We do the expiry of code at that point as buffers are in sync
|
219 |
-
# and all the workers will take the same decision.
|
220 |
-
self.expire_codes_(x)
|
221 |
-
ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay)
|
222 |
-
embed_sum = x.t() @ embed_onehot
|
223 |
-
ema_inplace(self.embed_avg, embed_sum.t(), self.decay)
|
224 |
-
cluster_size = (
|
225 |
-
laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon)
|
226 |
-
* self.cluster_size.sum()
|
227 |
-
)
|
228 |
-
embed_normalized = self.embed_avg / cluster_size.unsqueeze(1)
|
229 |
-
self.embed.data.copy_(embed_normalized)
|
230 |
-
|
231 |
-
return quantize, embed_ind
|
232 |
-
|
233 |
-
|
234 |
-
class VectorQuantization(nn.Module):
|
235 |
-
"""Vector quantization implementation.
|
236 |
-
Currently supports only euclidean distance.
|
237 |
-
Args:
|
238 |
-
dim (int): Dimension
|
239 |
-
codebook_size (int): Codebook size
|
240 |
-
codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim.
|
241 |
-
decay (float): Decay for exponential moving average over the codebooks.
|
242 |
-
epsilon (float): Epsilon value for numerical stability.
|
243 |
-
kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
|
244 |
-
kmeans_iters (int): Number of iterations used for kmeans initialization.
|
245 |
-
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
|
246 |
-
that have an exponential moving average cluster size less than the specified threshold with
|
247 |
-
randomly selected vector from the current batch.
|
248 |
-
commitment_weight (float): Weight for commitment loss.
|
249 |
-
"""
|
250 |
-
|
251 |
-
def __init__(
|
252 |
-
self,
|
253 |
-
dim: int,
|
254 |
-
codebook_size: int,
|
255 |
-
codebook_dim: tp.Optional[int] = None,
|
256 |
-
decay: float = 0.99,
|
257 |
-
epsilon: float = 1e-5,
|
258 |
-
kmeans_init: bool = True,
|
259 |
-
kmeans_iters: int = 50,
|
260 |
-
threshold_ema_dead_code: int = 2,
|
261 |
-
commitment_weight: float = 1.0,
|
262 |
-
):
|
263 |
-
super().__init__()
|
264 |
-
_codebook_dim: int = default(codebook_dim, dim)
|
265 |
-
|
266 |
-
requires_projection = _codebook_dim != dim
|
267 |
-
self.project_in = (
|
268 |
-
nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity()
|
269 |
-
)
|
270 |
-
self.project_out = (
|
271 |
-
nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity()
|
272 |
-
)
|
273 |
-
|
274 |
-
self.epsilon = epsilon
|
275 |
-
self.commitment_weight = commitment_weight
|
276 |
-
|
277 |
-
self._codebook = EuclideanCodebook(
|
278 |
-
dim=_codebook_dim,
|
279 |
-
codebook_size=codebook_size,
|
280 |
-
kmeans_init=kmeans_init,
|
281 |
-
kmeans_iters=kmeans_iters,
|
282 |
-
decay=decay,
|
283 |
-
epsilon=epsilon,
|
284 |
-
threshold_ema_dead_code=threshold_ema_dead_code,
|
285 |
-
)
|
286 |
-
self.codebook_size = codebook_size
|
287 |
-
|
288 |
-
@property
|
289 |
-
def codebook(self):
|
290 |
-
return self._codebook.embed
|
291 |
-
|
292 |
-
def encode(self, x):
|
293 |
-
x = rearrange(x, "b d n -> b n d")
|
294 |
-
x = self.project_in(x)
|
295 |
-
embed_in = self._codebook.encode(x)
|
296 |
-
return embed_in
|
297 |
-
|
298 |
-
def decode(self, embed_ind):
|
299 |
-
quantize = self._codebook.decode(embed_ind)
|
300 |
-
quantize = self.project_out(quantize)
|
301 |
-
quantize = rearrange(quantize, "b n d -> b d n")
|
302 |
-
return quantize
|
303 |
-
|
304 |
-
def forward(self, x):
|
305 |
-
device = x.device
|
306 |
-
x = rearrange(x, "b d n -> b n d")
|
307 |
-
x = self.project_in(x)
|
308 |
-
|
309 |
-
quantize, embed_ind = self._codebook(x)
|
310 |
-
|
311 |
-
if self.training:
|
312 |
-
quantize = x + (quantize - x).detach()
|
313 |
-
|
314 |
-
loss = torch.tensor([0.0], device=device, requires_grad=self.training)
|
315 |
-
|
316 |
-
if self.training:
|
317 |
-
if self.commitment_weight > 0:
|
318 |
-
commit_loss = F.mse_loss(quantize.detach(), x)
|
319 |
-
loss = loss + commit_loss * self.commitment_weight
|
320 |
-
|
321 |
-
quantize = self.project_out(quantize)
|
322 |
-
quantize = rearrange(quantize, "b n d -> b d n")
|
323 |
-
return quantize, embed_ind, loss
|
324 |
-
|
325 |
-
|
326 |
-
class ResidualVectorQuantization(nn.Module):
|
327 |
-
"""Residual vector quantization implementation.
|
328 |
-
Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf
|
329 |
-
"""
|
330 |
-
|
331 |
-
def __init__(self, *, num_quantizers, **kwargs):
|
332 |
-
super().__init__()
|
333 |
-
self.layers = nn.ModuleList(
|
334 |
-
[VectorQuantization(**kwargs) for _ in range(num_quantizers)]
|
335 |
-
)
|
336 |
-
|
337 |
-
def forward(
|
338 |
-
self, x, n_q: tp.Optional[int] = None, layers: tp.Optional[list] = None
|
339 |
-
):
|
340 |
-
quantized_out = 0.0
|
341 |
-
residual = x
|
342 |
-
|
343 |
-
all_losses = []
|
344 |
-
all_indices = []
|
345 |
-
out_quantized = []
|
346 |
-
|
347 |
-
n_q = n_q or len(self.layers)
|
348 |
-
|
349 |
-
for i, layer in enumerate(self.layers[:n_q]):
|
350 |
-
quantized, indices, loss = layer(residual)
|
351 |
-
residual = residual - quantized
|
352 |
-
quantized_out = quantized_out + quantized
|
353 |
-
|
354 |
-
all_indices.append(indices)
|
355 |
-
all_losses.append(loss)
|
356 |
-
if layers and i in layers:
|
357 |
-
out_quantized.append(quantized)
|
358 |
-
|
359 |
-
out_losses, out_indices = map(torch.stack, (all_losses, all_indices))
|
360 |
-
return quantized_out, out_indices, out_losses, out_quantized
|
361 |
-
|
362 |
-
def encode(
|
363 |
-
self, x: torch.Tensor, n_q: tp.Optional[int] = None, st: tp.Optional[int] = None
|
364 |
-
) -> torch.Tensor:
|
365 |
-
residual = x
|
366 |
-
all_indices = []
|
367 |
-
n_q = n_q or len(self.layers)
|
368 |
-
st = st or 0
|
369 |
-
for layer in self.layers[st:n_q]:
|
370 |
-
indices = layer.encode(residual)
|
371 |
-
quantized = layer.decode(indices)
|
372 |
-
residual = residual - quantized
|
373 |
-
all_indices.append(indices)
|
374 |
-
out_indices = torch.stack(all_indices)
|
375 |
-
return out_indices
|
376 |
-
|
377 |
-
def decode(self, q_indices: torch.Tensor, st: int = 0) -> torch.Tensor:
|
378 |
-
quantized_out = torch.tensor(0.0, device=q_indices.device)
|
379 |
-
for i, indices in enumerate(q_indices):
|
380 |
-
layer = self.layers[st + i]
|
381 |
-
quantized = layer.decode(indices)
|
382 |
-
quantized_out = quantized_out + quantized
|
383 |
-
return quantized_out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|