Spaces:
Running
on
T4
Running
on
T4
File size: 3,386 Bytes
5238467 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Base class for all quantizers.
"""
from dataclasses import dataclass, field
import typing as tp
import torch
from torch import nn
@dataclass
class QuantizedResult:
x: torch.Tensor
codes: torch.Tensor
bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item.
penalty: tp.Optional[torch.Tensor] = None
metrics: dict = field(default_factory=dict)
class BaseQuantizer(nn.Module):
"""Base class for quantizers.
"""
def forward(self, x: torch.Tensor, frame_rate: int) -> QuantizedResult:
"""
Given input tensor x, returns first the quantized (or approximately quantized)
representation along with quantized codes, bandwidth, and any penalty term for the loss.
Finally, this returns a dict of metrics to update logging etc.
Frame rate must be passed so that the bandwidth is properly computed.
"""
raise NotImplementedError()
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Encode a given input tensor with the specified sample rate at the given bandwidth.
"""
raise NotImplementedError()
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation.
"""
raise NotImplementedError()
@property
def total_codebooks(self):
"""Total number of codebooks.
"""
raise NotImplementedError()
@property
def num_codebooks(self):
"""Number of active codebooks.
"""
raise NotImplementedError()
def set_num_codebooks(self, n: int):
"""Set the number of active codebooks.
"""
raise NotImplementedError()
class DummyQuantizer(BaseQuantizer):
"""Fake quantizer that actually does not perform any quantization.
"""
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor, frame_rate: int):
q = x.unsqueeze(1)
return QuantizedResult(x, q, torch.tensor(q.numel() * 32 * frame_rate / 1000 / len(x)).to(x))
def encode(self, x: torch.Tensor) -> torch.Tensor:
"""Encode a given input tensor with the specified sample rate at the given bandwidth.
In the case of the DummyQuantizer, the codes are actually identical
to the input and resulting quantized representation as no quantization is done.
"""
return x.unsqueeze(1)
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation.
In the case of the DummyQuantizer, the codes are actually identical
to the input and resulting quantized representation as no quantization is done.
"""
return codes.squeeze(1)
@property
def total_codebooks(self):
"""Total number of codebooks.
"""
return 1
@property
def num_codebooks(self):
"""Total number of codebooks.
"""
return self.total_codebooks
def set_num_codebooks(self, n: int):
"""Set the number of active codebooks.
"""
raise AttributeError("Cannot override the number of codebooks for the dummy quantizer")
|