response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start) | def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir() |
Usage:
@profile("my_func")
def my_func(): code | def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name |
Copied from: https://github.com/EXP/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean | def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/EXP/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for name, (val, count) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {} |
If comm is provided, average all numerical stats across that comm | def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("EXP_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("exp-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("EXP_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("EXP_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir) |
Setup a distributed process group. | def setup_dist(device=0):
"""
Setup a distributed process group.
"""
global used_device
used_device = device
if dist.is_initialized():
return |
Get the device to use for torch.distributed. | def dev():
"""
Get the device to use for torch.distributed.
"""
global used_device
if torch.cuda.is_available() and used_device >= 0:
return torch.device(f"cuda:{used_device}")
return torch.device("cpu") |
Load a PyTorch file without redundant fetches across MPI ranks. | def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
return torch.load(path, **kwargs) |
Synchronize a sequence of Tensors across ranks from rank 0. | def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with torch.no_grad():
dist.broadcast(p, 0) |
Broadcast the tensors from the given parameters to all workers.
This can be used to ensure that all workers have the same model to start with. | def broadcast_tensors(tensors: tp.Iterable[torch.Tensor], src: int = 0):
"""Broadcast the tensors from the given parameters to all workers.
This can be used to ensure that all workers have the same model to start with.
"""
if not is_distributed():
return
tensors = [tensor for tensor in tensors if _is_complex_or_float(tensor)]
_check_number_of_params(tensors)
handles = []
for tensor in tensors:
handle = torch.distributed.broadcast(tensor.data, src=src, async_op=True)
handles.append(handle)
for handle in handles:
handle.wait() |
Crops a region from an image. | def apply_crop(
image,
ymin,
ymax,
xmin,
xmax,
):
"""Crops a region from an image."""
# NOTE: here we are expecting one of [H, W] [H, W, C] [B, H, W, C]
if len(image.shape) == 2:
return image[ymin:ymax, xmin:xmax]
elif len(image.shape) == 3:
return image[ymin:ymax, xmin:xmax, :]
elif len(image.shape) == 4:
return image[:, ymin:ymax, xmin:xmax, :]
else:
raise ValueError("provide a batch of images or a single image") |
Tile a given set of features into a convolutional map.
Args:
x: float tensor of shape [N, F]
size: int or a tuple
Returns:
a feature map [N, F, ∑size[0], size[1]] | def tile1d(x, size):
"""Tile a given set of features into a convolutional map.
Args:
x: float tensor of shape [N, F]
size: int or a tuple
Returns:
a feature map [N, F, ∑size[0], size[1]]
"""
# size = size if isinstance(size, tuple) else (size, size)
return x[:, :, np.newaxis].expand(-1, -1, size) |
Tile a given set of features into a convolutional map.
Args:
x: float tensor of shape [N, F]
size: int or a tuple
Returns:
a feature map [N, F, size[0], size[1]] | def tile2d(x, size: int):
"""Tile a given set of features into a convolutional map.
Args:
x: float tensor of shape [N, F]
size: int or a tuple
Returns:
a feature map [N, F, size[0], size[1]]
"""
# size = size if isinstance(size, tuple) else (size, size)
# NOTE: expecting only int here (!!!)
return x[:, :, np.newaxis, np.newaxis].expand(-1, -1, size, size) |
Generates numpy array filled in with Gaussian values.
The function generates Gaussian kernel (values according to the Gauss distribution)
on the grid according to the kernel size.
Args:
ksize (int): The kernel size, must be odd number larger than 1. Otherwise throws an exception.
std (float): The standard deviation, could be None, in which case it will be calculated
accordoing to the kernel size.
Returns:
np.array: The gaussian kernel. | def gaussian_kernel(ksize: int, std: Optional[float] = None) -> np.ndarray:
"""Generates numpy array filled in with Gaussian values.
The function generates Gaussian kernel (values according to the Gauss distribution)
on the grid according to the kernel size.
Args:
ksize (int): The kernel size, must be odd number larger than 1. Otherwise throws an exception.
std (float): The standard deviation, could be None, in which case it will be calculated
accordoing to the kernel size.
Returns:
np.array: The gaussian kernel.
"""
assert ksize % 2 == 1
radius = ksize // 2
if std is None:
std = np.sqrt(-(radius**2) / (2 * np.log(0.05)))
x, y = np.meshgrid(np.linspace(-radius, radius, ksize), np.linspace(-radius, radius, ksize))
xy = np.stack([x, y], axis=2)
gk = np.exp(-(xy**2).sum(-1) / (2 * std**2))
gk /= gk.sum()
return gk |
Wraps a torch.nn.Module class to support weight normalization. The wrapped class
is compatible with the fuse/unfuse syntax and is able to load state dict from previous
implementations.
Args:
cls: Type[th.nn.Module]
Class to apply the wrapper to.
new_cls_name: str
Name of the new class created by the wrapper. This should be the name
of whatever variable you assign the result of this function to. Ex:
``SomeLayerWN = weight_norm_wrapper(SomeLayer, "SomeLayerWN", ...)``
name: str
Name of the parameter to apply weight normalization to.
g_dim: int
Learnable dimension of the magnitude tensor. Set to None or -1 for single scalar magnitude.
Default values for Linear and Conv2d layers are 0s and for ConvTranspose2d layers are 1s.
v_dim: int
Of which dimension of the direction tensor is calutated independently for the norm. Set to
None or -1 for calculating norm over the entire direction tensor (weight tensor). Default
values for most of the WN layers are None to preserve the existing behavior. | def weight_norm_wrapper(
cls: Type[th.nn.Module],
new_cls_name: str,
name: str = "weight",
g_dim: int = 0,
v_dim: Optional[int] = 0,
):
"""Wraps a torch.nn.Module class to support weight normalization. The wrapped class
is compatible with the fuse/unfuse syntax and is able to load state dict from previous
implementations.
Args:
cls: Type[th.nn.Module]
Class to apply the wrapper to.
new_cls_name: str
Name of the new class created by the wrapper. This should be the name
of whatever variable you assign the result of this function to. Ex:
``SomeLayerWN = weight_norm_wrapper(SomeLayer, "SomeLayerWN", ...)``
name: str
Name of the parameter to apply weight normalization to.
g_dim: int
Learnable dimension of the magnitude tensor. Set to None or -1 for single scalar magnitude.
Default values for Linear and Conv2d layers are 0s and for ConvTranspose2d layers are 1s.
v_dim: int
Of which dimension of the direction tensor is calutated independently for the norm. Set to
None or -1 for calculating norm over the entire direction tensor (weight tensor). Default
values for most of the WN layers are None to preserve the existing behavior.
"""
class Wrap(cls):
def __init__(self, *args: Any, name=name, g_dim=g_dim, v_dim=v_dim, **kwargs: Any):
# Check if the extra arguments are overwriting arguments for the wrapped class
check_args_shadowing(
"weight_norm_wrapper", super().__init__, ["name", "g_dim", "v_dim"]
)
super().__init__(*args, **kwargs)
# Sanitize v_dim since we are hacking the built-in utility to support
# a non-standard WeightNorm implementation.
if v_dim is None:
v_dim = -1
self.weight_norm_args = {"name": name, "g_dim": g_dim, "v_dim": v_dim}
self.is_fused = True
self.unfuse()
# For backward compatibility.
self._register_load_state_dict_pre_hook(
TensorMappingHook(
[(name, name + "_v"), ("g", name + "_g")],
{name + "_g": getattr(self, name + "_g").shape},
)
)
def fuse(self):
if self.is_fused:
return
# Check if the module is frozen.
param_name = self.weight_norm_args["name"] + "_g"
if hasattr(self, param_name) and param_name not in self._parameters:
raise ValueError("Trying to fuse frozen module.")
remove_weight_norm(self, self.weight_norm_args["name"])
self.is_fused = True
def unfuse(self):
if not self.is_fused:
return
# Check if the module is frozen.
param_name = self.weight_norm_args["name"]
if hasattr(self, param_name) and param_name not in self._parameters:
raise ValueError("Trying to unfuse frozen module.")
wn = WeightNorm.apply(
self, self.weight_norm_args["name"], self.weight_norm_args["g_dim"]
)
# Overwrite the dim property to support mismatched norm calculate for v and g tensor.
if wn.dim != self.weight_norm_args["v_dim"]:
wn.dim = self.weight_norm_args["v_dim"]
# Adjust the norm values.
weight = getattr(self, self.weight_norm_args["name"] + "_v")
norm = getattr(self, self.weight_norm_args["name"] + "_g")
norm.data[:] = th.norm_except_dim(weight, 2, wn.dim)
self.is_fused = False
def __deepcopy__(self, memo):
# Delete derived tensor to avoid deepcopy error.
if not self.is_fused:
delattr(self, self.weight_norm_args["name"])
# Deepcopy.
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
if not self.is_fused:
setattr(result, self.weight_norm_args["name"], None)
setattr(self, self.weight_norm_args["name"], None)
return result
# Allows for pickling of the wrapper: https://bugs.python.org/issue13520
Wrap.__qualname__ = new_cls_name
return Wrap |
Wraps a torch.nn.Module class and perform additional interpolation on the
first and only positional input of the forward method.
Args:
cls: Type[th.nn.Module]
Class to apply the wrapper to.
new_cls_name: str
Name of the new class created by the wrapper. This should be the name
of whatever variable you assign the result of this function to. Ex:
``UpConv = interpolate_wrapper(Conv, "UpConv", ...)`` | def interpolate_wrapper(cls: Type[th.nn.Module], new_cls_name: str):
"""Wraps a torch.nn.Module class and perform additional interpolation on the
first and only positional input of the forward method.
Args:
cls: Type[th.nn.Module]
Class to apply the wrapper to.
new_cls_name: str
Name of the new class created by the wrapper. This should be the name
of whatever variable you assign the result of this function to. Ex:
``UpConv = interpolate_wrapper(Conv, "UpConv", ...)``
"""
class Wrap(cls):
def __init__(
self, *args: Any, size=None, scale_factor=None, mode="bilinear", **kwargs: Any
):
check_args_shadowing(
"interpolate_wrapper", super().__init__, ["size", "scale_factor", "mode"]
)
super().__init__(*args, **kwargs)
self.register_forward_pre_hook(
InterpolateHook(size=size, scale_factor=scale_factor, mode=mode)
)
# Allows for pickling of the wrapper: https://bugs.python.org/issue13520
Wrap.__qualname__ = new_cls_name
return Wrap |
Compute a UV-space face index map identifying which mesh face contains each
texel. For texels with no assigned triangle, the index will be -1. | def make_uv_face_index(
vt: th.Tensor,
vti: th.Tensor,
uv_shape: Union[Tuple[int, int], int],
flip_uv: bool = True,
device: Optional[Union[str, th.device]] = None,
):
"""Compute a UV-space face index map identifying which mesh face contains each
texel. For texels with no assigned triangle, the index will be -1."""
if isinstance(uv_shape, int):
uv_shape = (uv_shape, uv_shape)
if device is not None:
if isinstance(device, str):
dev = th.device(device)
else:
dev = device
assert dev.type == "cuda"
else:
dev = th.device("cuda")
vt = 1.0 - vt.clone()
if flip_uv:
vt = vt.clone()
vt[:, 1] = 1 - vt[:, 1]
vt_pix = 2.0 * vt.to(dev) - 1.0
vt_pix = th.cat([vt_pix, th.ones_like(vt_pix[:, 0:1])], dim=1)
meshes = Meshes(vt_pix[np.newaxis], vti[np.newaxis].to(dev))
with th.no_grad():
face_index, _, _, _ = rasterize_meshes(
meshes, uv_shape, faces_per_pixel=1, z_clip_value=0.0, bin_size=0
)
face_index = face_index[0, ..., 0]
return face_index |
Compute a UV-space vertex index map identifying which mesh vertices
comprise the triangle containing each texel. For texels with no assigned
triangle, all indices will be -1. | def make_uv_vert_index(
vt: th.Tensor,
vi: th.Tensor,
vti: th.Tensor,
uv_shape: Union[Tuple[int, int], int],
flip_uv: bool = True,
):
"""Compute a UV-space vertex index map identifying which mesh vertices
comprise the triangle containing each texel. For texels with no assigned
triangle, all indices will be -1.
"""
face_index_map = make_uv_face_index(vt, vti, uv_shape, flip_uv).to(vi.device)
vert_index_map = vi[face_index_map.clamp(min=0)]
vert_index_map[face_index_map < 0] = -1
return vert_index_map.long() |
Computes barycentric coordinates for a set of 2D query points given
coordintes for the 3 vertices of the enclosing triangle for each point. | def bary_coords(points: th.Tensor, triangles: th.Tensor, eps: float = 1.0e-6):
"""Computes barycentric coordinates for a set of 2D query points given
coordintes for the 3 vertices of the enclosing triangle for each point."""
x = points[:, 0] - triangles[2, :, 0]
x1 = triangles[0, :, 0] - triangles[2, :, 0]
x2 = triangles[1, :, 0] - triangles[2, :, 0]
y = points[:, 1] - triangles[2, :, 1]
y1 = triangles[0, :, 1] - triangles[2, :, 1]
y2 = triangles[1, :, 1] - triangles[2, :, 1]
denom = y2 * x1 - y1 * x2
n0 = y2 * x - x2 * y
n1 = x1 * y - y1 * x
# Small epsilon to prevent divide-by-zero error.
denom = th.where(denom >= 0, denom.clamp(min=eps), denom.clamp(max=-eps))
bary_0 = n0 / denom
bary_1 = n1 / denom
bary_2 = 1.0 - bary_0 - bary_1
return th.stack((bary_0, bary_1, bary_2)) |
Compute a UV-space barycentric map where each texel contains barycentric
coordinates for that texel within its enclosing UV triangle. For texels
with no assigned triangle, all 3 barycentric coordinates will be 0. | def make_uv_barys(
vt: th.Tensor,
vti: th.Tensor,
uv_shape: Union[Tuple[int, int], int],
flip_uv: bool = True,
):
"""Compute a UV-space barycentric map where each texel contains barycentric
coordinates for that texel within its enclosing UV triangle. For texels
with no assigned triangle, all 3 barycentric coordinates will be 0.
"""
if isinstance(uv_shape, int):
uv_shape = (uv_shape, uv_shape)
if flip_uv:
# Flip here because texture coordinates in some of our topo files are
# stored in OpenGL convention with Y=0 on the bottom of the texture
# unlike numpy/torch arrays/tensors.
vt = vt.clone()
vt[:, 1] = 1 - vt[:, 1]
face_index_map = make_uv_face_index(vt, vti, uv_shape, flip_uv=False).to(vt.device)
vti_map = vti.long()[face_index_map.clamp(min=0)]
uv_tri_uvs = vt[vti_map].permute(2, 0, 1, 3)
uv_grid = th.meshgrid(
th.linspace(0.5, uv_shape[0] - 0.5, uv_shape[0]) / uv_shape[0],
th.linspace(0.5, uv_shape[1] - 0.5, uv_shape[1]) / uv_shape[1],
)
uv_grid = th.stack(uv_grid[::-1], dim=2).to(uv_tri_uvs)
bary_map = bary_coords(uv_grid.view(-1, 2), uv_tri_uvs.view(3, -1, 2))
bary_map = bary_map.permute(1, 0).view(uv_shape[0], uv_shape[1], 3)
bary_map[face_index_map < 0] = 0
return face_index_map, bary_map |
Computes tangent, bitangent, and normal vectors given a mesh.
Args:
geom: [N, n_verts, 3] th.Tensor
Vertex positions.
vt: [n_uv_coords, 2] th.Tensor
UV coordinates.
vi: [..., 3] th.Tensor
Face vertex indices.
vti: [..., 3] th.Tensor
Face UV indices.
Returns:
[..., 3] th.Tensors for T, B, N. | def compute_tbn(geom, vt, vi, vti):
"""Computes tangent, bitangent, and normal vectors given a mesh.
Args:
geom: [N, n_verts, 3] th.Tensor
Vertex positions.
vt: [n_uv_coords, 2] th.Tensor
UV coordinates.
vi: [..., 3] th.Tensor
Face vertex indices.
vti: [..., 3] th.Tensor
Face UV indices.
Returns:
[..., 3] th.Tensors for T, B, N.
"""
v0 = geom[:, vi[..., 0]]
v1 = geom[:, vi[..., 1]]
v2 = geom[:, vi[..., 2]]
vt0 = vt[vti[..., 0]]
vt1 = vt[vti[..., 1]]
vt2 = vt[vti[..., 2]]
v01 = v1 - v0
v02 = v2 - v0
vt01 = vt1 - vt0
vt02 = vt2 - vt0
f = 1.0 / (
vt01[None, ..., 0] * vt02[None, ..., 1]
- vt01[None, ..., 1] * vt02[None, ..., 0]
)
tangent = f[..., None] * th.stack(
[
v01[..., 0] * vt02[None, ..., 1] - v02[..., 0] * vt01[None, ..., 1],
v01[..., 1] * vt02[None, ..., 1] - v02[..., 1] * vt01[None, ..., 1],
v01[..., 2] * vt02[None, ..., 1] - v02[..., 2] * vt01[None, ..., 1],
],
dim=-1,
)
tangent = F.normalize(tangent, dim=-1)
normal = F.normalize(th.cross(v01, v02, dim=3), dim=-1)
bitangent = F.normalize(th.cross(tangent, normal, dim=3), dim=-1)
return tangent, bitangent, normal |
Computes mapping from vertex indices to texture indices.
Args:
vi: [F, 3], triangles
vti: [F, 3], texture triangles
n_max: int, max number of texture locations
Returns:
[n_verts, n_max], texture indices | def compute_v2uv(n_verts, vi, vti, n_max=4):
"""Computes mapping from vertex indices to texture indices.
Args:
vi: [F, 3], triangles
vti: [F, 3], texture triangles
n_max: int, max number of texture locations
Returns:
[n_verts, n_max], texture indices
"""
v2uv_dict = {}
for i_v, i_uv in zip(vi.reshape(-1), vti.reshape(-1)):
v2uv_dict.setdefault(i_v, set()).add(i_uv)
assert len(v2uv_dict) == n_verts
v2uv = np.zeros((n_verts, n_max), dtype=np.int32)
for i in range(n_verts):
vals = sorted(list(v2uv_dict[i]))
v2uv[i, :] = vals[0]
v2uv[i, : len(vals)] = np.array(vals)
return v2uv |
Computes first-ring neighbours given vertices and faces. | def compute_neighbours(n_verts, vi, n_max_values=10):
"""Computes first-ring neighbours given vertices and faces."""
n_vi = vi.shape[0]
adj = {i: set() for i in range(n_verts)}
for i in range(n_vi):
for idx in vi[i]:
adj[idx] |= set(vi[i]) - set([idx])
nbs_idxs = np.tile(np.arange(n_verts)[:, np.newaxis], (1, n_max_values))
nbs_weights = np.zeros((n_verts, n_max_values), dtype=np.float32)
for idx in range(n_verts):
n_values = min(len(adj[idx]), n_max_values)
nbs_idxs[idx, :n_values] = np.array(list(adj[idx]))[:n_values]
nbs_weights[idx, :n_values] = -1.0 / n_values
return nbs_idxs, nbs_weights |
Project a set of 3D points into multiple cameras with a pinhole model.
Args:
p: [B, N, 3], input 3D points in world coordinates
Rt: [B, NC, 3, 4], extrinsics (where NC is the number of cameras to project to)
K: [B, NC, 3, 3], intrinsics
normalize: bool, whether to normalize coordinates to [-1.0, 1.0]
Returns:
tuple:
- [B, NC, N, 2] - projected points
- [B, NC, N] - their | def project_points_multi(p, Rt, K, normalize=False, size=None):
"""Project a set of 3D points into multiple cameras with a pinhole model.
Args:
p: [B, N, 3], input 3D points in world coordinates
Rt: [B, NC, 3, 4], extrinsics (where NC is the number of cameras to project to)
K: [B, NC, 3, 3], intrinsics
normalize: bool, whether to normalize coordinates to [-1.0, 1.0]
Returns:
tuple:
- [B, NC, N, 2] - projected points
- [B, NC, N] - their
"""
B, N = p.shape[:2]
NC = Rt.shape[1]
Rt = Rt.reshape(B * NC, 3, 4)
K = K.reshape(B * NC, 3, 3)
# [B, N, 3] -> [B * NC, N, 3]
p = p[:, np.newaxis].expand(-1, NC, -1, -1).reshape(B * NC, -1, 3)
p_cam = p @ Rt[:, :3, :3].mT + Rt[:, :3, 3][:, np.newaxis]
p_pix = p_cam @ K.mT
p_depth = p_pix[:, :, 2:]
p_pix = (p_pix[..., :2] / p_depth).reshape(B, NC, N, 2)
p_depth = p_depth.reshape(B, NC, N)
if normalize:
assert size is not None
h, w = size
p_pix = (
2.0 * p_pix / th.as_tensor([w, h], dtype=th.float32, device=p.device) - 1.0
)
return p_pix, p_depth |
Convert XYZ image to normal image
Args:
xyz: th.Tensor
[B, 3, H, W] XYZ image
Returns:
th.Tensor: [B, 3, H, W] image of normals | def xyz2normals(xyz: th.Tensor, eps: float = 1e-8) -> th.Tensor:
"""Convert XYZ image to normal image
Args:
xyz: th.Tensor
[B, 3, H, W] XYZ image
Returns:
th.Tensor: [B, 3, H, W] image of normals
"""
nrml = th.zeros_like(xyz)
xyz = th.cat((xyz[:, :, :1, :] * 0, xyz[:, :, :, :], xyz[:, :, :1, :] * 0), dim=2)
xyz = th.cat((xyz[:, :, :, :1] * 0, xyz[:, :, :, :], xyz[:, :, :, :1] * 0), dim=3)
U = (xyz[:, :, 2:, 1:-1] - xyz[:, :, :-2, 1:-1]) / -2
V = (xyz[:, :, 1:-1, 2:] - xyz[:, :, 1:-1, :-2]) / -2
nrml[:, 0, ...] = U[:, 1, ...] * V[:, 2, ...] - U[:, 2, ...] * V[:, 1, ...]
nrml[:, 1, ...] = U[:, 2, ...] * V[:, 0, ...] - U[:, 0, ...] * V[:, 2, ...]
nrml[:, 2, ...] = U[:, 0, ...] * V[:, 1, ...] - U[:, 1, ...] * V[:, 0, ...]
veclen = th.norm(nrml, dim=1, keepdim=True).clamp(min=eps)
return nrml / veclen |
Convert depth image to XYZ image using camera intrinsics
Args:
depth: th.Tensor
[B, 1, H, W] depth image
focal: th.Tensor
[B, 2, 2] camera focal lengths
princpt: th.Tensor
[B, 2] camera principal points
Returns:
th.Tensor: [B, 3, H, W] XYZ image | def depth2xyz(depth, focal, princpt) -> th.Tensor:
"""Convert depth image to XYZ image using camera intrinsics
Args:
depth: th.Tensor
[B, 1, H, W] depth image
focal: th.Tensor
[B, 2, 2] camera focal lengths
princpt: th.Tensor
[B, 2] camera principal points
Returns:
th.Tensor: [B, 3, H, W] XYZ image
"""
b, h, w = depth.shape[0], depth.shape[2], depth.shape[3]
ix = (
th.arange(w, device=depth.device).float()[None, None, :] - princpt[:, None, None, 0]
) / focal[:, None, None, 0, 0]
iy = (
th.arange(h, device=depth.device).float()[None, :, None] - princpt[:, None, None, 1]
) / focal[:, None, None, 1, 1]
xyz = th.zeros((b, 3, h, w), device=depth.device)
xyz[:, 0, ...] = depth[:, 0, :, :] * ix
xyz[:, 1, ...] = depth[:, 0, :, :] * iy
xyz[:, 2, ...] = depth[:, 0, :, :]
return xyz |
Convert depth image to normal image using camera intrinsics
Args:
depth: th.Tensor
[B, 1, H, W] depth image
focal: th.Tensor
[B, 2, 2] camera focal lengths
princpt: th.Tensor
[B, 2] camera principal points
Returns:
th.Tensor: [B, 3, H, W] normal image | def depth2normals(depth, focal, princpt) -> th.Tensor:
"""Convert depth image to normal image using camera intrinsics
Args:
depth: th.Tensor
[B, 1, H, W] depth image
focal: th.Tensor
[B, 2, 2] camera focal lengths
princpt: th.Tensor
[B, 2] camera principal points
Returns:
th.Tensor: [B, 3, H, W] normal image
"""
return xyz2normals(depth2xyz(depth, focal, princpt)) |
Computes first-ring neighbours given vertices and faces. | def compute_neighbours(
n_verts: int, vi: th.Tensor, n_max_values: int = 10
) -> Tuple[th.Tensor, th.Tensor]:
"""Computes first-ring neighbours given vertices and faces."""
n_vi = vi.shape[0]
adj = {i: set() for i in range(n_verts)}
for i in range(n_vi):
for idx in vi[i]:
adj[idx] |= set(vi[i]) - {idx}
nbs_idxs = np.tile(np.arange(n_verts)[:, np.newaxis], (1, n_max_values))
nbs_weights = np.zeros((n_verts, n_max_values), dtype=np.float32)
for idx in range(n_verts):
n_values = min(len(adj[idx]), n_max_values)
nbs_idxs[idx, :n_values] = np.array(list(adj[idx]))[:n_values]
nbs_weights[idx, :n_values] = -1.0 / n_values
return nbs_idxs, nbs_weights |
Computes mapping from vertex indices to texture indices.
Args:
vi: [F, 3], triangles
vti: [F, 3], texture triangles
n_max: int, max number of texture locations
Returns:
[n_verts, n_max], texture indices | def compute_v2uv(n_verts: int, vi: th.Tensor, vti: th.Tensor, n_max: int = 4) -> th.Tensor:
"""Computes mapping from vertex indices to texture indices.
Args:
vi: [F, 3], triangles
vti: [F, 3], texture triangles
n_max: int, max number of texture locations
Returns:
[n_verts, n_max], texture indices
"""
v2uv_dict = {}
for i_v, i_uv in zip(vi.reshape(-1), vti.reshape(-1)):
v2uv_dict.setdefault(i_v, set()).add(i_uv)
assert len(v2uv_dict) == n_verts
v2uv = np.zeros((n_verts, n_max), dtype=np.int32)
for i in range(n_verts):
vals = sorted(v2uv_dict[i])
v2uv[i, :] = vals[0]
v2uv[i, : len(vals)] = np.array(vals)
return v2uv |
Compute tangents, bitangents, normals.
Args:
tri_xyz: [B,N,3,3] vertex coordinates
tri_uv: [N,2] texture coordinates
Returns:
tangents, bitangents, normals | def compute_tbn_uv(
tri_xyz: th.Tensor, tri_uv: th.Tensor, eps: float = 1e-5
) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""Compute tangents, bitangents, normals.
Args:
tri_xyz: [B,N,3,3] vertex coordinates
tri_uv: [N,2] texture coordinates
Returns:
tangents, bitangents, normals
"""
tri_uv = tri_uv[np.newaxis]
v01 = tri_xyz[:, :, 1] - tri_xyz[:, :, 0]
v02 = tri_xyz[:, :, 2] - tri_xyz[:, :, 0]
normals = th.cross(v01, v02, dim=-1)
normals = normals / th.norm(normals, dim=-1, keepdim=True).clamp(min=eps)
vt01 = tri_uv[:, :, 1] - tri_uv[:, :, 0]
vt02 = tri_uv[:, :, 2] - tri_uv[:, :, 0]
f = th.tensor([1.0], device=tri_xyz.device) / (
vt01[..., 0] * vt02[..., 1] - vt01[..., 1] * vt02[..., 0]
)
tangents = f[..., np.newaxis] * (
v01 * vt02[..., 1][..., np.newaxis] - v02 * vt01[..., 1][..., np.newaxis]
)
tangents = tangents / th.norm(tangents, dim=-1, keepdim=True).clamp(min=eps)
bitangents = th.cross(normals, tangents, dim=-1)
bitangents = bitangents / th.norm(bitangents, dim=-1, keepdim=True).clamp(min=eps).clamp(
min=eps
)
return tangents, bitangents, normals |
Interpolate values on the mesh. | def interpolate_values_mesh(
src_values: th.Tensor, src_faces: th.Tensor, idxs: th.Tensor, weights: th.Tensor
) -> th.Tensor:
"""Interpolate values on the mesh."""
assert src_faces.dtype == th.long, "index should be torch.long"
assert len(src_values.shape) in [2, 3], "supporting [N, F] and [B, N, F] only"
if src_values.shape == 2:
return (src_values[src_faces[idxs]] * weights[..., np.newaxis]).sum(dim=1)
else: # src.verts.shape == 3:
return (src_values[:, src_faces[idxs]] * weights[np.newaxis, ..., np.newaxis]).sum(dim=2) |
Given a 3D mesh and a set of query points, return closest point barycentrics
Args:
v: np.array (float)
[N, 3] mesh vertices
vi: np.array (int)
[N, 3] mesh triangle indices
points: np.array (float)
[M, 3] query points
Returns:
Tuple[approx, barys, interp_idxs, face_idxs]
approx: [M, 3] approximated (closest) points on the mesh
barys: [M, 3] barycentric weights that produce "approx"
interp_idxs: [M, 3] vertex indices for barycentric interpolation
face_idxs: [M] face indices for barycentric interpolation. interp_idxs = vi[face_idxs] | def closest_point_barycentrics(
v: np.ndarray, vi: np.ndarray, points: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Given a 3D mesh and a set of query points, return closest point barycentrics
Args:
v: np.array (float)
[N, 3] mesh vertices
vi: np.array (int)
[N, 3] mesh triangle indices
points: np.array (float)
[M, 3] query points
Returns:
Tuple[approx, barys, interp_idxs, face_idxs]
approx: [M, 3] approximated (closest) points on the mesh
barys: [M, 3] barycentric weights that produce "approx"
interp_idxs: [M, 3] vertex indices for barycentric interpolation
face_idxs: [M] face indices for barycentric interpolation. interp_idxs = vi[face_idxs]
"""
mesh = Trimesh(vertices=v, faces=vi)
p, _, face_idxs = closest_point(mesh, points)
barys = points_to_barycentric(mesh.triangles[face_idxs], p)
b0, b1, b2 = np.split(barys, 3, axis=1)
interp_idxs = vi[face_idxs]
v0 = v[interp_idxs[:, 0]]
v1 = v[interp_idxs[:, 1]]
v2 = v[interp_idxs[:, 2]]
approx = b0 * v0 + b1 * v1 + b2 * v2
return approx, barys, interp_idxs, face_idxs |
Compute a UV-space barycentric map where each texel contains barycentric
coordinates for the closest point on a UV triangle.
Args:
vt: th.Tensor
Texture coordinates. Shape = [n_texcoords, 2]
vti: th.Tensor
Face texture coordinate indices. Shape = [n_faces, 3]
uv_shape: Tuple[int, int] or int
Shape of the texture map. (HxW)
flip_uv: bool
Whether or not to flip UV coordinates along the V axis (OpenGL -> numpy/pytorch convention).
return_approx_dist: bool
Whether or not to include the distance to the nearest point.
Returns:
th.Tensor: index_img: Face index image, shape [uv_shape[0], uv_shape[1]]
th.Tensor: Barycentric coordinate map, shape [uv_shape[0], uv_shape[1], 3] | def make_closest_uv_barys(
vt: np.ndarray,
vti: np.ndarray,
uv_shape: Union[Tuple[int, int], int],
flip_uv: bool = True,
return_approx_dist: bool = False,
) -> Union[Tuple[th.Tensor, th.Tensor], Tuple[th.Tensor, th.Tensor, th.Tensor]]:
"""Compute a UV-space barycentric map where each texel contains barycentric
coordinates for the closest point on a UV triangle.
Args:
vt: th.Tensor
Texture coordinates. Shape = [n_texcoords, 2]
vti: th.Tensor
Face texture coordinate indices. Shape = [n_faces, 3]
uv_shape: Tuple[int, int] or int
Shape of the texture map. (HxW)
flip_uv: bool
Whether or not to flip UV coordinates along the V axis (OpenGL -> numpy/pytorch convention).
return_approx_dist: bool
Whether or not to include the distance to the nearest point.
Returns:
th.Tensor: index_img: Face index image, shape [uv_shape[0], uv_shape[1]]
th.Tensor: Barycentric coordinate map, shape [uv_shape[0], uv_shape[1], 3]
"""
if isinstance(uv_shape, int):
uv_shape = (uv_shape, uv_shape)
if flip_uv:
# Flip here because texture coordinates in some of our topo files are
# stored in OpenGL convention with Y=0 on the bottom of the texture
# unlike numpy/torch arrays/tensors.
vt = vt.clone()
vt[:, 1] = 1 - vt[:, 1]
# Texel to UV mapping (as per OpenGL linear filtering)
# https://www.khronos.org/registry/OpenGL/specs/gl/glspec46.core.pdf
# Sect. 8.14, page 261
# uv=(0.5,0.5)/w is at the center of texel [0,0]
# uv=(w-0.5, w-0.5)/w is the center of texel [w-1,w-1]
# texel = floor(u*w - 0.5)
# u = (texel+0.5)/w
uv_grid = th.meshgrid(
th.linspace(0.5, uv_shape[0] - 1 + 0.5, uv_shape[0]) / uv_shape[0],
th.linspace(0.5, uv_shape[1] - 1 + 0.5, uv_shape[1]) / uv_shape[1],
) # HxW, v,u
uv_grid = th.stack(uv_grid[::-1], dim=2) # HxW, u, v
uv = uv_grid.reshape(-1, 2).data.to("cpu").numpy()
vth = np.hstack((vt, vt[:, 0:1] * 0 + 1))
uvh = np.hstack((uv, uv[:, 0:1] * 0 + 1))
approx, barys, interp_idxs, face_idxs = closest_point_barycentrics(vth, vti, uvh)
index_img = th.from_numpy(face_idxs.reshape(uv_shape[0], uv_shape[1])).long()
bary_img = th.from_numpy(barys.reshape(uv_shape[0], uv_shape[1], 3)).float()
if return_approx_dist:
dist = np.linalg.norm(approx - uvh, axis=1)
dist = th.from_numpy(dist.reshape(uv_shape[0], uv_shape[1])).float()
return index_img, bary_img, dist
else:
return index_img, bary_img |
Computes tangent, bitangent, and normal vectors given a mesh.
Args:
geom: [N, n_verts, 3] th.Tensor
Vertex positions.
vt: [n_uv_coords, 2] th.Tensor
UV coordinates.
vi: [..., 3] th.Tensor
Face vertex indices.
vti: [..., 3] th.Tensor
Face UV indices.
Returns:
[..., 3] th.Tensors for T, B, N. | def compute_tbn(
geom: th.Tensor, vt: th.Tensor, vi: th.Tensor, vti: th.Tensor
) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""Computes tangent, bitangent, and normal vectors given a mesh.
Args:
geom: [N, n_verts, 3] th.Tensor
Vertex positions.
vt: [n_uv_coords, 2] th.Tensor
UV coordinates.
vi: [..., 3] th.Tensor
Face vertex indices.
vti: [..., 3] th.Tensor
Face UV indices.
Returns:
[..., 3] th.Tensors for T, B, N.
"""
v0 = geom[:, vi[..., 0]]
v1 = geom[:, vi[..., 1]]
v2 = geom[:, vi[..., 2]]
vt0 = vt[vti[..., 0]]
vt1 = vt[vti[..., 1]]
vt2 = vt[vti[..., 2]]
v01 = v1 - v0
v02 = v2 - v0
vt01 = vt1 - vt0
vt02 = vt2 - vt0
f = th.tensor([1.0], device=geom.device) / (
vt01[None, ..., 0] * vt02[None, ..., 1] - vt01[None, ..., 1] * vt02[None, ..., 0]
)
tangent = f[..., None] * th.stack(
[
v01[..., 0] * vt02[None, ..., 1] - v02[..., 0] * vt01[None, ..., 1],
v01[..., 1] * vt02[None, ..., 1] - v02[..., 1] * vt01[None, ..., 1],
v01[..., 2] * vt02[None, ..., 1] - v02[..., 2] * vt01[None, ..., 1],
],
dim=-1,
)
tangent = F.normalize(tangent, dim=-1)
normal = F.normalize(th.cross(v01, v02, dim=3), dim=-1)
bitangent = F.normalize(th.cross(tangent, normal, dim=3), dim=-1)
return tangent, bitangent, normal |
Computes the Rodrigues vectors r from the rotation matrices `R` | def invRodrigues(R: th.Tensor, eps: float = 1e-8) -> th.Tensor:
"""Computes the Rodrigues vectors r from the rotation matrices `R`"""
# t = trace(R)
# theta = rotational angle
# [omega]_x = (R-R^T)/2
# r = theta/sin(theta)*omega
assert R.shape[-2:] == (3, 3)
t = R[..., 0, 0] + R[..., 1, 1] + R[..., 2, 2]
theta = acos_safe_th((t - 1) / 2)
omega = (
th.stack(
(
R[..., 2, 1] - R[..., 1, 2],
R[..., 0, 2] - R[..., 2, 0],
R[..., 1, 0] - R[..., 0, 1],
),
-1,
)
/ 2
)
# Edge Case 1: t >= 3 - eps
inv_sinc = theta / th.sin(theta)
inv_sinc_taylor_expansion = (
1
+ (1.0 / 6.0) * th.pow(theta, 2)
+ (7.0 / 360.0) * th.pow(theta, 4)
+ (31.0 / 15120.0) * th.pow(theta, 6)
)
# Edge Case 2: t <= -1 + eps
# From: https://math.stackexchange.com/questions/83874/efficient-and-accurate-numerical
# -implementation-of-the-inverse-rodrigues-rotatio
a = th.diagonal(R, 0, -2, -1).argmax(dim=-1)
b = (a + 1) % 3
c = (a + 2) % 3
s = th.sqrt(R[..., a, a] - R[..., b, b] - R[..., c, c] + 1 + 1e-4)
v = th.zeros_like(omega)
v[..., a] = s / 2
v[..., b] = (R[..., b, a] + R[..., a, b]) / (2 * s)
v[..., c] = (R[..., c, a] + R[..., a, c]) / (2 * s)
norm = th.norm(v, dim=-1, keepdim=True).to(v.dtype).clamp(min=eps)
pi_vnorm = np.pi * (v / norm)
# use taylor expansion when R is close to a identity matrix (trace(R) ~= 3)
r = th.where(
t[:, None] > (3 - 1e-3),
inv_sinc_taylor_expansion[..., None] * omega,
th.where(t[:, None] < -1 + 1e-3, pi_vnorm, inv_sinc[..., None] * omega),
)
return r |
Given a topology along with uv correspondences for the topology (eg. keypoints correspondences in uv space),
this function will produce a tuple with the bary coordinates for each uv correspondece along with the vertex index.
Parameters:
----------
topology: Input mesh that contains vertices, faces and texture coordinates info.
uv_correspondences: N X 2 uv locations that describe the uv correspondence to the topology
Returns:
-------
bary: (N X 3 float)
For each uv correspondence returns the bary corrdinates for the uv pixel
triangles: (N X 3 int)
For each uv correspondence returns the face (i.e vertices of the faces) for that pixel. | def get_barys_for_uvs(
topology: Dict[str, Any], uv_correspondences: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Given a topology along with uv correspondences for the topology (eg. keypoints correspondences in uv space),
this function will produce a tuple with the bary coordinates for each uv correspondece along with the vertex index.
Parameters:
----------
topology: Input mesh that contains vertices, faces and texture coordinates info.
uv_correspondences: N X 2 uv locations that describe the uv correspondence to the topology
Returns:
-------
bary: (N X 3 float)
For each uv correspondence returns the bary corrdinates for the uv pixel
triangles: (N X 3 int)
For each uv correspondence returns the face (i.e vertices of the faces) for that pixel.
"""
vi: np.ndarray = topology["vi"]
vt: np.ndarray = topology["vt"]
vti: np.ndarray = topology["vti"]
# # No up-down flip here
# Here we pad the texture cordinates and correspondences with a 0
vth = np.hstack((vt[:, :2], vt[:, :1] * 0))
kp_uv_h = np.hstack((uv_correspondences, uv_correspondences[:, :1] * 0))
_, kp_barys, _, face_indices = closest_point_barycentrics(vth, vti, kp_uv_h)
kp_verts = vi[face_indices]
return kp_barys, kp_verts |
Applies ad-hoc 'color correction' to a linear RGB Mugsy image along
color channel `dim` and returns the gamma-corrected result. | def linear2color_corr(
img: Union[th.Tensor, np.ndarray], dim: int = -1
) -> Union[th.Tensor, np.ndarray]:
"""Applies ad-hoc 'color correction' to a linear RGB Mugsy image along
color channel `dim` and returns the gamma-corrected result."""
if dim == -1:
dim = len(img.shape) - 1
gamma = 2.0
black = 3.0 / 255.0
color_scale = [1.4, 1.1, 1.6]
assert img.shape[dim] == 3
if dim == -1:
dim = len(img.shape) - 1
if isinstance(img, th.Tensor):
scale = th.FloatTensor(color_scale).view([3 if i == dim else 1 for i in range(img.dim())])
img = img * scale.to(img) / 1.1
return th.clamp(
(((1.0 / (1 - black)) * 0.95 * th.clamp(img - black, 0, 2)).pow(1.0 / gamma))
- 15.0 / 255.0,
0,
2,
)
else:
scale = np.array(color_scale).reshape([3 if i == dim else 1 for i in range(img.ndim)])
img = img * scale / 1.1
return np.clip(
(((1.0 / (1 - black)) * 0.95 * np.clip(img - black, 0, 2)) ** (1.0 / gamma))
- 15.0 / 255.0,
0,
2,
) |
Inverse of linear2color_corr.
Removes ad-hoc 'color correction' from a gamma-corrected RGB Mugsy image
along color channel `dim` and returns the linear RGB result. | def linear2color_corr_inv(img: th.Tensor, dim: int) -> th.Tensor:
"""Inverse of linear2color_corr.
Removes ad-hoc 'color correction' from a gamma-corrected RGB Mugsy image
along color channel `dim` and returns the linear RGB result."""
gamma = 2.0
black = 3.0 / 255.0
color_scale = [1.4, 1.1, 1.6]
assert img.shape[dim] == 3
if dim == -1:
dim = len(img.shape) - 1
scale = th.FloatTensor(color_scale).view([3 if i == dim else 1 for i in range(img.dim())])
img = (img + 15.0 / 255.0).pow(gamma) / (0.95 / (1 - black)) + black
return th.clamp(img / (scale.to(img) / 1.1), 0, 1) |
Maps a previously-characterized camera color space into a linear
color space. IMPORTANT: This function assumes RGB channel order,
not BGR.
The characterization is specified by `ccm`, `dc_offset`, and `gamma`.
The dimension index of the color channel is specified with `dim` (de-
fault is -1 i.e. last dimension.)
The function accepts both [0, 255] integer and [0, 1] float formats.
However, the return value is always floating point in [0, 1]-range.
FIXME(swirajaya) -
This is a reimplementation of `RGBMapping::map_to_lin_rgb` in
`//arvr/projects/codec_avatar/calibration/colorcal:colorspace`. To
figure out a C++ / Py binding solution that works for both DGX and
PROD, as well as `np.ndarray` and `th.Tensor`.
Args:
@param img the image in RGB, as th.Tensor or np.ndarray
@param dim dimension of color channel
@param ccm 3x3 color correction matrix
@param dc_offset camera black level/dc offset
@param gamma encoding gamma
Returns:
@return the corrected image as float th.Tensor or np.ndarray | def mapped2linear(
img: Union[th.Tensor, np.ndarray],
dim: int = -1,
ccm: Union[List[List[float]], th.Tensor, np.ndarray] = DEFAULT_CCM,
dc_offset: Union[List[float], th.Tensor, np.ndarray] = DEFAULT_DC_OFFSET,
gamma: float = DEFAULT_GAMMA,
) -> Union[th.Tensor, np.ndarray]:
"""Maps a previously-characterized camera color space into a linear
color space. IMPORTANT: This function assumes RGB channel order,
not BGR.
The characterization is specified by `ccm`, `dc_offset`, and `gamma`.
The dimension index of the color channel is specified with `dim` (de-
fault is -1 i.e. last dimension.)
The function accepts both [0, 255] integer and [0, 1] float formats.
However, the return value is always floating point in [0, 1]-range.
FIXME(swirajaya) -
This is a reimplementation of `RGBMapping::map_to_lin_rgb` in
`//arvr/projects/codec_avatar/calibration/colorcal:colorspace`. To
figure out a C++ / Py binding solution that works for both DGX and
PROD, as well as `np.ndarray` and `th.Tensor`.
Args:
@param img the image in RGB, as th.Tensor or np.ndarray
@param dim dimension of color channel
@param ccm 3x3 color correction matrix
@param dc_offset camera black level/dc offset
@param gamma encoding gamma
Returns:
@return the corrected image as float th.Tensor or np.ndarray
"""
assert img.shape[dim] == 3
if dim == -1:
dim = len(img.shape) - 1
ndim: int = img.dim() if th.is_tensor(img) else img.ndim
pixel_shape: List[int] = [3 if i == dim else 1 for i in range(ndim)]
# Summation indices for CCM matrix multiplication
# e.g. [sum_j] CCM_ij * Img_kljnpq -> ImgCorr_klinpq if say, dim == 2
ein_ccm: List[int] = [0, 1]
ein_inp: List[int] = [1 if i == dim else i + 2 for i in range(ndim)]
ein_out: List[int] = [0 if i == dim else i + 2 for i in range(ndim)]
EPS: float = 1e-7
if isinstance(img, th.Tensor):
if th.is_floating_point(img):
input_saturated = img > (1.0 - EPS)
imgf = img.double()
else:
input_saturated = img == 255
imgf = img.double() / 255.0
dc_offset = th.DoubleTensor(dc_offset).view(pixel_shape).to(img.device)
img_linear = th.clamp(
imgf - dc_offset,
min=EPS,
).pow(1.0 / gamma)
img_corr = th.clamp( # CCM * img_linear
th.einsum(th.DoubleTensor(ccm).to(img.device), ein_ccm, img_linear, ein_inp, ein_out),
min=0.0,
max=1.0,
)
img_corr = th.where(input_saturated, 1.0, img_corr)
else:
if np.issubdtype(img.dtype, np.floating):
input_saturated = img > (1.0 - EPS)
imgf = img.astype(float)
else:
input_saturated = img == 255
imgf = img.astype(float) / 255.0
dc_offset = np.array(dc_offset).reshape(pixel_shape)
img_linear = np.clip(imgf - dc_offset, a_min=EPS, a_max=None) ** (1.0 / gamma)
img_corr: np.ndarray = np.clip( # CCM * img_linear
np.einsum(np.array(ccm), ein_ccm, img_linear, ein_inp, ein_out),
a_min=0.0,
a_max=1.0,
)
img_corr: np.ndarray = np.where(input_saturated, 1.0, img_corr)
return img_corr |
Maps a previously-characterized camera color space into sRGB co-
lor space (assuming mapped to Rec709). IMPORTANT: This function
assumes RGB channel order, not BGR.
The characterization is specified by `ccm`, `dc_offset`, and `gamma`.
The dimension index of the color channel is specified with `dim`
(default is -1 i.e. last dimension.) | def mapped2srgb(
img: Union[th.Tensor, np.ndarray],
dim: int = -1,
ccm: Union[List[List[float]], th.Tensor, np.ndarray] = DEFAULT_CCM,
dc_offset: Union[List[float], th.Tensor, np.ndarray] = DEFAULT_DC_OFFSET,
gamma: float = DEFAULT_GAMMA,
) -> Union[th.Tensor, np.ndarray]:
"""Maps a previously-characterized camera color space into sRGB co-
lor space (assuming mapped to Rec709). IMPORTANT: This function
assumes RGB channel order, not BGR.
The characterization is specified by `ccm`, `dc_offset`, and `gamma`.
The dimension index of the color channel is specified with `dim`
(default is -1 i.e. last dimension.)
"""
# Note: The redundant if-statement below is due to a Pyre bug.
# Currently Pyre fails to handle arguments into overloaded functions that are typed
# as a union of the overloaded method parameter types.
if isinstance(img, th.Tensor):
return linear2srgb(mapped2linear(img, dim, ccm, dc_offset, gamma), gamma=2.4)
else:
return linear2srgb(mapped2linear(img, dim, ccm, dc_offset, gamma), gamma=2.4) |
Takes a difference image returns a new version scaled s.t. its values
are remapped from [-IMG_MAX, IMG_MAX] -> [0, IMG_MAX] where IMG_MAX is
either 1 or 255 dpeending on the range of the input. | def scale_diff_image(diff_img: th.Tensor) -> th.Tensor:
"""Takes a difference image returns a new version scaled s.t. its values
are remapped from [-IMG_MAX, IMG_MAX] -> [0, IMG_MAX] where IMG_MAX is
either 1 or 255 dpeending on the range of the input."""
mval = abs(diff_img).max().item()
pix_range = (0, 128 if mval > 1 else 0.5, 255 if mval > 1 else 1)
return (pix_range[1] * (diff_img / mval) + pix_range[1]).clamp(pix_range[0], pix_range[2]) |
Converts a tensor to an uint8 image Numpy array with `cv2.COLORMAP_JET` applied.
Args:
tensor: Input tensor to be converted.
x_max: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_max = tensor.max() if None is given.
x_min: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_min = tensor.min() if None is given. | def tensor2rgbjet(
tensor: th.Tensor, x_max: Optional[float] = None, x_min: Optional[float] = None
) -> np.ndarray:
"""Converts a tensor to an uint8 image Numpy array with `cv2.COLORMAP_JET` applied.
Args:
tensor: Input tensor to be converted.
x_max: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_max = tensor.max() if None is given.
x_min: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_min = tensor.min() if None is given.
"""
return cv2.applyColorMap(tensor2rgb(tensor, x_max=x_max, x_min=x_min), cv2.COLORMAP_JET) |
Converts a tensor to an uint8 image Numpy array.
Args:
tensor: Input tensor to be converted.
x_max: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_max = tensor.max() if None is given.
x_min: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_min = tensor.min() if None is given. | def tensor2rgb(
tensor: th.Tensor, x_max: Optional[float] = None, x_min: Optional[float] = None
) -> np.ndarray:
"""Converts a tensor to an uint8 image Numpy array.
Args:
tensor: Input tensor to be converted.
x_max: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_max = tensor.max() if None is given.
x_min: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_min = tensor.min() if None is given.
"""
x = tensor.data.cpu().numpy()
if x_min is None:
x_min = x.min()
if x_max is None:
x_max = x.max()
gain = 255 / np.clip(x_max - x_min, 1e-3, None)
x = (x - x_min) * gain
x = x.clip(0.0, 255.0)
x = x.astype(np.uint8)
return x |
Converts a tensor to an image.
Args:
tensor: Input tensor to be converted.
The shape of the tensor should be CxHxW or HxW. The channels are assumed to be in RGB format.
x_max: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_max = tensor.max() if None is explicitly given.
x_min: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_min = tensor.min() if None is explicitly given.
mode: Can be `rgb` or `jet`. If `jet` is given, cv2.COLORMAP_JET would be applied.
mask: Optional mask to be applied to the input tensor.
label: Optional text to be added to the output image. | def tensor2image(
tensor: th.Tensor,
x_max: Optional[float] = 1.0,
x_min: Optional[float] = 0.0,
mode: str = "rgb",
mask: Optional[th.Tensor] = None,
label: Optional[str] = None,
) -> np.ndarray:
"""Converts a tensor to an image.
Args:
tensor: Input tensor to be converted.
The shape of the tensor should be CxHxW or HxW. The channels are assumed to be in RGB format.
x_max: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_max = tensor.max() if None is explicitly given.
x_min: The output color will be normalized as (x-x_min)/(x_max-x_min)*255.
x_min = tensor.min() if None is explicitly given.
mode: Can be `rgb` or `jet`. If `jet` is given, cv2.COLORMAP_JET would be applied.
mask: Optional mask to be applied to the input tensor.
label: Optional text to be added to the output image.
"""
tensor = tensor.detach()
# Apply mask
if mask is not None:
tensor = tensor * mask
if len(tensor.size()) == 2:
tensor = tensor[None]
# Make three channel image
assert len(tensor.size()) == 3, tensor.size()
n_channels = tensor.shape[0]
if n_channels == 1:
tensor = tensor.repeat(3, 1, 1)
elif n_channels != 3:
raise ValueError(f"Unsupported number of channels {n_channels}.")
# Convert to display format
img = tensor.permute(1, 2, 0)
if mode == "rgb":
img = tensor2rgb(img, x_max=x_max, x_min=x_min)
elif mode == "jet":
# `cv2.applyColorMap` assumes input format in BGR
img[:, :, :3] = img[:, :, [2, 1, 0]]
img = tensor2rgbjet(img, x_max=x_max, x_min=x_min)
# convert back to rgb
img[:, :, :3] = img[:, :, [2, 1, 0]]
else:
raise ValueError(f"Unsupported mode {mode}.")
if label is not None:
img = add_label_centered(img, label)
return img |
Adds label to an image
Args:
img: Input image.
text: Text to be added on the image.
font_scale: The scale of the font.
thickness: Thinkness of the lines.
alignment: Can be `top` or `buttom`. The alignment of the text.
color: The color of the text. Assumes the same color space as `img`. | def add_label_centered(
img: np.ndarray,
text: str,
font_scale: float = 1.0,
thickness: int = 2,
alignment: str = "top",
color: Tuple[int, int, int] = (0, 255, 0),
) -> np.ndarray:
"""Adds label to an image
Args:
img: Input image.
text: Text to be added on the image.
font_scale: The scale of the font.
thickness: Thinkness of the lines.
alignment: Can be `top` or `buttom`. The alignment of the text.
color: The color of the text. Assumes the same color space as `img`.
"""
font = cv2.FONT_HERSHEY_SIMPLEX
textsize = cv2.getTextSize(text, font, font_scale, thickness=thickness)[0]
img = img.astype(np.uint8).copy()
if alignment == "top":
cv2.putText(
img,
text,
((img.shape[1] - textsize[0]) // 2, 50),
font,
font_scale,
color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
elif alignment == "bottom":
cv2.putText(
img,
text,
((img.shape[1] - textsize[0]) // 2, img.shape[0] - textsize[1]),
font,
font_scale,
color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
else:
raise ValueError("Unknown text alignment")
return img |
Return a 256 x 3 array representing a color map from OpenCV. | def get_color_map(name: str = "COLORMAP_JET") -> np.ndarray:
"""Return a 256 x 3 array representing a color map from OpenCV."""
color_map = np.arange(256, dtype=np.uint8).reshape(1, 256)
color_map = cv2.applyColorMap(color_map, getattr(cv2, name))
return color_map[0, :, ::-1].copy() |
Arranges a tensor of images (or a dict with labeled image tensors) into
a grid.
Params:
data: Either a single image tensor [N, {1, 3}, H, W] containing images to
arrange in a grid layout, or a dict with tensors of the same shape.
If a dict is given, assume each entry in the dict is a batch of
images, and form a grid where each cell contains one sample from
each entry in the dict. Images should be in the range [0, 255].
keys_to_draw: Select which keys in the dict should be included in each
grid cell. If none are given, draw all keys.
scale_factor: Optional scale factor applied to each image.
draw_labels: Whether or not to draw the keys on each image.
grid_size: Optionally specify the size of the resulting grid. | def make_image_grid(
data: Union[th.Tensor, Dict[str, th.Tensor]],
keys_to_draw: Optional[List[str]] = None,
scale_factor: Optional[float] = None,
draw_labels: bool = True,
grid_size: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
"""Arranges a tensor of images (or a dict with labeled image tensors) into
a grid.
Params:
data: Either a single image tensor [N, {1, 3}, H, W] containing images to
arrange in a grid layout, or a dict with tensors of the same shape.
If a dict is given, assume each entry in the dict is a batch of
images, and form a grid where each cell contains one sample from
each entry in the dict. Images should be in the range [0, 255].
keys_to_draw: Select which keys in the dict should be included in each
grid cell. If none are given, draw all keys.
scale_factor: Optional scale factor applied to each image.
draw_labels: Whether or not to draw the keys on each image.
grid_size: Optionally specify the size of the resulting grid.
"""
if isinstance(data, th.Tensor):
data = {"": data}
keys_to_draw = [""]
if keys_to_draw is None:
keys_to_draw = list(data.keys())
n_cells = data[keys_to_draw[0]].shape[0]
img_h = data[keys_to_draw[0]].shape[2]
img_w = data[keys_to_draw[0]].shape[3]
# Resize all images to match the shape of the first image, and convert
# Greyscale -> RGB.
for key in keys_to_draw:
if data[key].shape[1] == 1:
data[key] = data[key].expand(-1, 3, -1, -1)
elif data[key].shape[1] != 3:
raise ValueError(
f"Image data must all be of shape [N, {1,3}, H, W]. Got shape {data[key].shape}."
)
data[key] = data[key].clamp(min=0, max=255)
if data[key].shape[2] != img_h or data[key].shape[3] != img_w:
data[key] = thf.interpolate(data[key], size=(img_h, img_w), mode="area")
if scale_factor is not None:
data[key] = thf.interpolate(data[key], scale_factor=scale_factor, mode="area")
# Make an image for each grid cell by labeling and concatenating a sample
# from each key in the data.
cell_imgs = []
for i in range(n_cells):
imgs = [data[key][i].byte().cpu().numpy().transpose(1, 2, 0) for key in keys_to_draw]
imgs = [np.ascontiguousarray(img) for img in imgs]
if draw_labels:
for img, label in zip(imgs, keys_to_draw):
cv2.putText(
img, label, (31, 31), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 2, cv2.LINE_AA
)
cv2.putText(
img,
label,
(30, 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.75,
(255, 255, 255),
2,
cv2.LINE_AA,
)
cell_imgs.append(np.concatenate(imgs, axis=1))
cell_h, cell_w = cell_imgs[0].shape[:2]
# Find the most-square grid layout.
if grid_size is not None:
gh, gw = grid_size
if gh * gw < n_cells:
raise ValueError(
f"Requested grid size ({gh}, {gw}) (H, W) cannot hold {n_cells} images."
)
else:
best_diff = np.inf
best_side = np.inf
best_leftover = np.inf
gw = 0
for gh_ in range(1, n_cells + 1):
for gw_ in range(1, n_cells + 1):
if gh_ * gw_ < n_cells:
continue
h = gh_ * cell_h
w = gw_ * cell_w
diff = abs(h - w)
max_side = max(gh_, gw_)
leftover = gh_ * gw_ - n_cells
if diff <= best_diff and max_side <= best_side and leftover <= best_leftover:
gh = gh_
gw = gw_
best_diff = diff
best_side = max_side
best_leftover = leftover
# Put the images into the grid.
img = np.zeros((gh * cell_h, gw * cell_w, 3), dtype=np.uint8)
for i in range(n_cells):
gr = i // gw
gc = i % gw
img[gr * cell_h : (gr + 1) * cell_h, gc * cell_w : (gc + 1) * cell_w] = cell_imgs[i]
return img |
A simpler version of `make_image_grid` that works for the whole batch at once.
Usecase: A dict containing diagnostic output. All tensors in the dict have a shape of [N, {1, 3}, H, W]
where N concides for all entries. The goal is to arranges images into a grid so that each column
corrensponds to a key, and each row corrensponds to an index in batch.
Example:
Data:
dict = {"A": A, "B": B, "C": C}
Grid:
| A[0] | B[0] | C[0] |
| A[1] | B[1] | C[1] |
| A[2] | B[2] | C[2] |
The the grid will be aranged such way, that:
- Each row corrensponds to an index in the batch.
- Each column corrensponds to a key in the dict
- For each row, images are resize such that the vertical edge matches the largest image
Args:
data (Dict[str, th.Tensor]): Diagnostic data.
max_row_hight (int): The maximum allowed hight of a row.
draw_labels (bool): Whether the keys should be drawn as labels
input_is_in_0_1 (bool): If true, input data is assumed to be in range 0..1 otherwise in range 0..255 | def make_image_grid_batched(
data: Dict[str, th.Tensor],
max_row_hight: Optional[int] = None,
draw_labels: bool = True,
input_is_in_0_1: bool = False,
) -> np.ndarray:
"""A simpler version of `make_image_grid` that works for the whole batch at once.
Usecase: A dict containing diagnostic output. All tensors in the dict have a shape of [N, {1, 3}, H, W]
where N concides for all entries. The goal is to arranges images into a grid so that each column
corrensponds to a key, and each row corrensponds to an index in batch.
Example:
Data:
dict = {"A": A, "B": B, "C": C}
Grid:
| A[0] | B[0] | C[0] |
| A[1] | B[1] | C[1] |
| A[2] | B[2] | C[2] |
The the grid will be aranged such way, that:
- Each row corrensponds to an index in the batch.
- Each column corrensponds to a key in the dict
- For each row, images are resize such that the vertical edge matches the largest image
Args:
data (Dict[str, th.Tensor]): Diagnostic data.
max_row_hight (int): The maximum allowed hight of a row.
draw_labels (bool): Whether the keys should be drawn as labels
input_is_in_0_1 (bool): If true, input data is assumed to be in range 0..1 otherwise in range 0..255
"""
data_list = list(data.values())
keys_to_draw = data.keys()
if not all(x.ndim == 4 and (x.shape[1] == 1 or x.shape[1] == 3) for x in data_list):
raise ValueError(
f"Image data must all be of shape [N, {1, 3}, H, W]. Got shapes {[x.shape for x in data_list]}."
)
if not all(x.shape[0] == data_list[0].shape[0] for x in data_list):
raise ValueError("Batch sizes must be the same.")
data_list = resize_to_match(data_list, edge="vertical", max_size=max_row_hight)
if not all(x.shape[2] == data_list[0].shape[2] for x in data_list):
raise ValueError("Heights must be the same.")
with th.no_grad():
# Make all images contain 3 channels
data_list = [x.expand(-1, 3, -1, -1) if x.shape[1] == 1 else x for x in data_list]
# Convert to byte
scale = 255.0 if input_is_in_0_1 else 1.0
data_list = [x.mul(scale).round().clamp(min=0, max=255).byte() for x in data_list]
# Convert to numpy and make it BHWC
data_list = [x.cpu().numpy().transpose(0, 2, 3, 1) for x in data_list]
rows = []
# Iterate by key
for j, label in zip(range(len(data_list)), keys_to_draw):
col = []
# Iterate by batch index
for i in range(data_list[0].shape[0]):
img = np.ascontiguousarray(data_list[j][i])
if draw_labels:
cv2.putText(
img, label, (31, 31), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 2, cv2.LINE_AA
)
cv2.putText(
img,
label,
(30, 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.75,
(255, 255, 255),
2,
cv2.LINE_AA,
)
col.append(img)
rows.append(np.concatenate(col, axis=0))
return np.concatenate(rows, axis=1) |
Resizes a list of image tensors s.t. a chosen edge ("long", "short", "vertical", or "horizontal")
matches that edge on the largest image in the list. | def resize_to_match(
tensors: List[th.Tensor],
edge: str = "long",
mode: str = "nearest",
max_size: Optional[int] = None,
) -> List[th.Tensor]:
"""Resizes a list of image tensors s.t. a chosen edge ("long", "short", "vertical", or "horizontal")
matches that edge on the largest image in the list."""
assert edge in {"short", "long", "vertical", "horizontal"}
max_shape = [max(x) for x in zip(*[t.shape for t in tensors])]
resized_tensors = []
for tensor in tensors:
if edge == "long":
edge_idx = np.argmax(tensor.shape[-2:])
elif edge == "short":
edge_idx = np.argmin(tensor.shape[-2:])
elif edge == "vertical":
edge_idx = 0
else: # edge == "horizontal":
edge_idx = 1
target_size = max_shape[-2:][edge_idx]
if max_size is not None:
target_size = min(max_size, max_shape[-2:][edge_idx])
if tensor.shape[-2:][edge_idx] != target_size:
ratio = target_size / tensor.shape[-2:][edge_idx]
tensor = thf.interpolate(
tensor,
scale_factor=ratio,
align_corners=False if mode in ["bilinear", "bicubic"] else None,
recompute_scale_factor=True,
mode=mode,
)
resized_tensors.append(tensor)
return resized_tensors |
Helper used by Rosetta to draw text on tensors using OpenCV. | def draw_text(
canvas: th.Tensor,
text: str,
loc: Tuple[float, float],
font: int = cv2.FONT_HERSHEY_SIMPLEX,
scale: float = 2,
color: Tuple[float, float, float] = (0, 0, 0),
thickness: float = 3,
) -> th.Tensor:
"""Helper used by Rosetta to draw text on tensors using OpenCV."""
device = canvas.device
canvas_new = canvas.cpu().numpy().transpose(0, 2, 3, 1)
for i in range(canvas_new.shape[0]):
image = canvas_new[i].copy()
if isinstance(text, list):
cv2.putText(image, text[i], loc, font, scale, color, thickness)
else:
cv2.putText(image, text, loc, font, scale, color, thickness)
canvas_new[i] = image
canvas_tensor = th.ByteTensor(canvas_new.transpose(0, 3, 1, 2)).to(device)
return canvas_tensor |
Visualizes a scalar image using specified color map. | def visualize_scalar_image(
img: np.ndarray,
min_val: float,
val_range: float,
color_map: int = cv2.COLORMAP_JET,
convert_to_rgb: bool = True,
) -> np.ndarray:
"""
Visualizes a scalar image using specified color map.
"""
scaled_img = (img.astype(np.float32) - min_val) / val_range
vis = cv2.applyColorMap((scaled_img * 255).clip(0, 255).astype(np.uint8), color_map)
if convert_to_rgb:
vis = cv2.cvtColor(vis, cv2.COLOR_BGR2RGB)
return vis |
Process the depth image within the range for visualization. | def process_depth_image(
depth_img: np.ndarray, depth_min: float, depth_max: float, depth_err_range: float
) -> Tuple[np.ndarray, np.ndarray]:
"""
Process the depth image within the range for visualization.
"""
valid_pixels = np.logical_and(depth_img > 0, depth_img <= depth_max)
new_depth_img = np.zeros_like(depth_img)
new_depth_img[valid_pixels] = depth_img[valid_pixels]
err_image = np.abs(new_depth_img - depth_img).astype(np.float32) / depth_err_range
return new_depth_img, err_image |
Draw Keypoints on given image. | def draw_keypoints(img: np.ndarray, kpt: np.ndarray, kpt_w: float) -> np.ndarray:
"""
Draw Keypoints on given image.
"""
x, y = kpt[:, 0], kpt[:, 1]
w = kpt[:, 2] * kpt_w
col = np.array([-255.0, 255.0, -255.0]) * w[:, np.newaxis]
pts = np.column_stack((x.astype(np.int32), y.astype(np.int32)))
for pt, c in zip(pts, col):
cv2.circle(img, tuple(pt), 2, tuple(c), -1)
return img |
Moves channels dimension to the end of tensor.
Makes it more suitable for visualizations. | def tensor_to_rgb_array(tensor: th.Tensor) -> np.ndarray:
"""Moves channels dimension to the end of tensor.
Makes it more suitable for visualizations.
"""
return tensor.permute(0, 2, 3, 1).detach().cpu().numpy() |
Renders keypoints onto a given image with particular color.
Supports overlaps. | def draw_keypoints_with_color(
image: np.ndarray, keypoints_uvw: np.ndarray, color: Color
) -> np.ndarray:
"""Renders keypoints onto a given image with particular color.
Supports overlaps.
"""
assert len(image.shape) == 3
assert image.shape[-1] == 3
coords = keypoints_uvw[:, :2].astype(np.int32)
tmp_img = np.zeros(image.shape, dtype=np.float32)
for uv in coords:
cv2.circle(tmp_img, tuple(uv), 2, color, -1)
return (image + tmp_img).clip(0.0, 255.0).astype(np.uint8) |
Draw Contour on given image. | def draw_contour(img: np.ndarray, contour_corrs: np.ndarray) -> np.ndarray:
"""
Draw Contour on given image.
"""
for corr in contour_corrs:
mesh_uv = corr[1:3]
seg_uv = corr[3:]
x, y = int(mesh_uv[0] + 0.5), int(mesh_uv[1] + 0.5)
cv2.circle(img, (x, y), 1, (255, 0, 0), -1)
cv2.line(
img,
(int(mesh_uv[0]), int(mesh_uv[1])),
(int(seg_uv[0]), int(seg_uv[1])),
(-255, -255, 255),
1,
)
return img |
:param param: batch_size x (7*nr_skeleton_joints) ParamTransform Outputs.
:return: batch_size x nr_skeleton_joints x 8 Skeleton States
8 stands form 3 translation + 4 rotation (quat) + 1 scale | def solve_skeleton_state(param: th.Tensor, joint_offset: th.Tensor, joint_rotation: th.Tensor, joint_parents: th.Tensor):
"""
:param param: batch_size x (7*nr_skeleton_joints) ParamTransform Outputs.
:return: batch_size x nr_skeleton_joints x 8 Skeleton States
8 stands form 3 translation + 4 rotation (quat) + 1 scale
"""
batch_size = param.shape[0]
# batch processing for parameters
jp = param.view((batch_size, -1, 7))
lt = jp[:, :, 0:3] + joint_offset.unsqueeze(0)
lr = Quaternion.batchMul(joint_rotation.unsqueeze(0), Quaternion.batchFromXYZ(jp[:, :, 3:6]))
ls = torch.pow(
torch.tensor([2.0], dtype=torch.float32, device=param.device),
jp[:, :, 6].unsqueeze(2),
)
state = []
for index, parent in enumerate(joint_parents):
if int(parent) != -1:
gr = Quaternion.batchMul(state[parent][:, :, 3:7], lr[:, index, :].unsqueeze(1))
gt = (
Quaternion.batchRot(
state[parent][:, :, 3:7],
lt[:, index, :].unsqueeze(1) * state[parent][:, :, 7].unsqueeze(2),
)
+ state[parent][:, :, 0:3]
)
gs = state[parent][:, :, 7].unsqueeze(2) * ls[:, index, :].unsqueeze(1)
state.append(torch.cat((gt, gr, gs), dim=2))
else:
state.append(
torch.cat((lt[:, index, :], lr[:, index, :], ls[:, index, :]), dim=1).view(
(batch_size, 1, 8)
)
)
return torch.cat(state, dim=1) |
Computes a normalized representation of the pose in quaternion space.
This is a delta between the per-joint local transformation and the bind state.
Returns:
[B, NJ, 4] - normalized rotations | def compute_normalized_pose_quat(lbs, local_pose, scale):
"""Computes a normalized representation of the pose in quaternion space.
This is a delta between the per-joint local transformation and the bind state.
Returns:
[B, NJ, 4] - normalized rotations
"""
B = local_pose.shape[0]
global_pose_zero = th.zeros((B, 6), dtype=th.float32, device=local_pose.device)
params = lbs.param_transform(th.cat([global_pose_zero, local_pose, scale], axis=-1))
params = params.reshape(B, -1, 7)
# applying rotation
# TODO: what is this?
rot_quat = Quaternion.batchMul(lbs.joint_rotation[np.newaxis], Quaternion.batchFromXYZ(params[:, :, 3:6]))
# removing the bind state
bind_rot_quat = Quaternion.batchInvert(lbs.bind_state[:, :, 3:7])
return Quaternion.batchMul(rot_quat, bind_rot_quat) |
Computes pose regions given a linear blend skinning function. | def compute_pose_regions_legacy(lbs_fn) -> np.ndarray:
"""Computes pose regions given a linear blend skinning function."""
weights = lbs_fn.joints_weights.cpu().numpy()
n_pos = lbs_fn.param_transform.nr_position_params
param_masks = np.zeros((n_pos, lbs_fn.joints_weights.shape[-1]))
children = {j: [] for j in range(lbs_fn.num_joints)}
parents = {j: None for j in range(lbs_fn.num_joints)}
prec = {j: [] for j in range(lbs_fn.num_joints)}
for j in range(lbs_fn.num_joints):
parent_index = int(lbs_fn.joint_parents[j, 0])
if parent_index == -1:
continue
children[parent_index].append(j)
parents[j] = parent_index
prec[j] = [parent_index, int(lbs_fn.joint_parents[parent_index, 0])]
# get parameters for each joint
j_to_p = get_influence_map(lbs_fn.param_transform.transform, n_pos)
# get all the joints
p_to_j = [[] for i in range(n_pos)]
for j, pidx in enumerate(j_to_p):
for p in pidx:
if j not in p_to_j[p]:
p_to_j[p].append(j)
for p, jidx in enumerate(p_to_j):
param_masks[p] = weights[jidx].sum(axis=0)
if not np.any(param_masks[p]):
assert len(jidx) == 1
jidx_c = children[jidx[0]][:]
for jc in jidx_c[:]:
jidx_c += children[jc]
param_masks[p] = weights[jidx_c].sum(axis=0)
return param_masks > 0.0 |
Load a module or class given the module/class name.
Example:
.. code-block:: python
eye_geo = load_class("path.to.module", "ClassName")
Args:
module_name: str
The full path of the module relative to the root directory. Ex: ``utils.module_loader``
class_name: str
The name of the class within the module to load.
silent: bool
If set to True, return None instead of raising an exception if module/class is missing
Returns:
object:
The loaded module or class object. | def load_module(
module_name: str, class_name: Optional[str] = None, silent: bool = False
):
"""
Load a module or class given the module/class name.
Example:
.. code-block:: python
eye_geo = load_class("path.to.module", "ClassName")
Args:
module_name: str
The full path of the module relative to the root directory. Ex: ``utils.module_loader``
class_name: str
The name of the class within the module to load.
silent: bool
If set to True, return None instead of raising an exception if module/class is missing
Returns:
object:
The loaded module or class object.
"""
try:
module = importlib.import_module(f"visualize.{module_name}")
if class_name:
return getattr(module, class_name)
else:
return module
except ModuleNotFoundError as e:
if silent:
return None
logger.error(f"Module not found: {module_name}", exc_info=True)
raise
except AttributeError as e:
if silent:
return None
logger.error(
f"Can not locate class: {class_name} in {module_name}.", exc_info=True
)
raise |
A shortcut for making an object given the config and arguments
Args:
mod_config: AttrDict
Config. Should contain keys: module_name, class_name, and optionally args
*args
Positional arguments.
**kwargs
Default keyword arguments. Overwritten by content from mod_config.args
Returns:
object:
The loaded module or class object. | def make_module(mod_config: AttrDict, *args: Any, **kwargs: Any) -> Any:
"""
A shortcut for making an object given the config and arguments
Args:
mod_config: AttrDict
Config. Should contain keys: module_name, class_name, and optionally args
*args
Positional arguments.
**kwargs
Default keyword arguments. Overwritten by content from mod_config.args
Returns:
object:
The loaded module or class object.
"""
mod_config_dict = dict(mod_config)
mod_args = mod_config_dict.pop("args", {})
mod_args.update({k: v for k, v in kwargs.items() if k not in mod_args.keys()})
mod_class = load_module(**mod_config_dict)
return mod_class(*args, **mod_args) |
Returns a name of an object in a form <module>.<parent_scope>.<name> | def get_full_name(mod: object) -> str:
"""
Returns a name of an object in a form <module>.<parent_scope>.<name>
"""
mod_class = mod.__class__
return f"{mod_class.__module__}.{mod_class.__qualname__}" |
Load a class given the full class name.
Example:
.. code-block:: python
class_instance = load_class("module.path.ClassName")
Args:
class_name: txt
The full class name including the full path of the module relative to the root directory.
Returns:
A class | def load_class(class_name: str):
"""
Load a class given the full class name.
Example:
.. code-block:: python
class_instance = load_class("module.path.ClassName")
Args:
class_name: txt
The full class name including the full path of the module relative to the root directory.
Returns:
A class
"""
# This is a false-positive, pyre doesn't understand rsplit(..., 1) can only have 1-2 elements
# pyre-fixme[6]: In call `load_module`, for 1st positional only parameter expected `bool` but got `str`.
return load_module(*class_name.rsplit(".", 1)) |
Instantiate an object given the class name and initialization arguments.
Example:
.. code-block:: python
my_model = load_object(ObjectSpec(**my_model_config), in_channels=3)
Args:
spec: ObjectSpec
An ObjectSpec object that specifies the class name and init arguments.
kwargs: dict
Additional keyword arguments for initialization.
Returns:
An object | def load_object(spec: ObjectSpec, **kwargs: Any):
"""
Instantiate an object given the class name and initialization arguments.
Example:
.. code-block:: python
my_model = load_object(ObjectSpec(**my_model_config), in_channels=3)
Args:
spec: ObjectSpec
An ObjectSpec object that specifies the class name and init arguments.
kwargs: dict
Additional keyword arguments for initialization.
Returns:
An object
"""
if spec.module_name is None:
object_class = load_class(spec.class_name)
else:
object_class = load_module(spec.module_name, spec.class_name)
# Debug message for overriding the object spec
for key in kwargs:
if key in spec.kwargs:
logger.debug(f"Overriding {key} as {kwargs[key]} in {spec}.")
return object_class(**{**spec.kwargs, **kwargs}) |
Instantiate an object given a config and arguments. | def load_from_config(config: AttrDict, **kwargs):
"""Instantiate an object given a config and arguments."""
assert "class_name" in config and "module_name" not in config
config = copy.deepcopy(config)
class_name = config.pop("class_name")
object_class = load_class(class_name)
return object_class(**config, **kwargs) |
Get the names arguments of the forward pass for the module.
Args:
module: a class with `forward()` method | def forward_parameter_names(module):
"""Get the names arguments of the forward pass for the module.
Args:
module: a class with `forward()` method
"""
names = []
params = list(inspect.signature(module.forward).parameters.values())[1:]
for p in params:
if p.name in {"*args", "**kwargs"}:
raise ValueError("*args and **kwargs are not supported")
names.append(p.name)
return names |
Build an optimizer given optimizer config and a model.
Args:
config: DictConfig
model: nn.Module|Dict[str,nn.Module] | def build_optimizer(config, model):
"""Build an optimizer given optimizer config and a model.
Args:
config: DictConfig
model: nn.Module|Dict[str,nn.Module]
"""
config = copy.deepcopy(config)
if isinstance(model, nn.Module):
if "per_module" in config:
params = []
for name, value in config.per_module.items():
if not hasattr(model, name):
logger.warning(
f"model {model.__class__} does not have a submodule {name}, skipping"
)
continue
params.append(
dict(
params=getattr(model, name).parameters(),
**value,
)
)
defined_names = set(config.per_module.keys())
for name, module in model.named_children():
n_params = len(list(module.named_parameters()))
if name not in defined_names and n_params:
logger.warning(
f"not going to optimize module {name} which has {n_params} parameters"
)
config.pop("per_module")
else:
params = model.parameters()
else:
# NOTE: can we do
assert "per_module" in config
assert isinstance(model, dict)
for name, value in config.per_module.items():
params = []
for name, value in config.per_module.items():
if name not in model:
logger.warning(f"not aware of {name}, skipping")
continue
params.append(
dict(
params=model[name].parameters(),
**value,
)
)
return load_from_config(config, params=params) |
Calculate RT and residual L2 loss for two pointclouds
:param src_pointcloud: x (b, v, 3)
:param dst_pointcloud: y (b, v, 3)
:return: loss, R, t s.t. ||Rx+t-y||_2^2 minimal. | def pointcloud_rigid_registration(src_pointcloud, dst_pointcloud, reduce_loss: bool = True):
"""
Calculate RT and residual L2 loss for two pointclouds
:param src_pointcloud: x (b, v, 3)
:param dst_pointcloud: y (b, v, 3)
:return: loss, R, t s.t. ||Rx+t-y||_2^2 minimal.
"""
if len(src_pointcloud.shape) == 2:
src_pointcloud = src_pointcloud.unsqueeze(0)
if len(dst_pointcloud.shape) == 2:
dst_pointcloud = dst_pointcloud.unsqueeze(0)
bn = src_pointcloud.shape[0]
assert src_pointcloud.shape == dst_pointcloud.shape
assert src_pointcloud.shape[2] == 3
X = src_pointcloud - src_pointcloud.mean(dim=1, keepdim=True)
Y = dst_pointcloud - dst_pointcloud.mean(dim=1, keepdim=True)
XYT = th.einsum("nji,njk->nik", X, Y)
muX = src_pointcloud.mean(dim=1)
muY = dst_pointcloud.mean(dim=1)
R = th.zeros((bn, 3, 3), dtype=src_pointcloud.dtype).to(src_pointcloud.device)
t = th.zeros((bn, 1, 3), dtype=src_pointcloud.dtype).to(src_pointcloud.device)
loss = th.zeros((bn,), dtype=src_pointcloud.dtype).to(src_pointcloud.device)
for i in range(bn):
u_, s_, v_ = th.svd(XYT[i, :, :])
detvut = th.det(v_.mm(u_.t()))
diag_m = th.ones_like(s_)
diag_m[-1] = detvut
r_ = v_.mm(th.diag(diag_m)).mm(u_.t())
t_ = muY[i, :] - r_.mm(muX[i, :, None])[:, 0]
R[i, :, :] = r_
t[i, 0, :] = t_
loss[i] = (th.einsum("ij,nj->ni", r_, X[i]) - Y[i]).pow(2).sum(1).mean(0)
loss = loss.mean(0) if reduce_loss else loss
return loss, R, t |
Calculate RT and residual L2 loss for two pointclouds
:param src_pointcloud: x (b, v, 3)
:param dst_pointcloud: y (b, v, 3)
:param weight: (v, ), duplication of vertices
:return: loss, R, t s.t. ||w(Rx+t-y)||_2^2 minimal. | def pointcloud_rigid_registration_balanced(src_pointcloud, dst_pointcloud, weight):
"""
Calculate RT and residual L2 loss for two pointclouds
:param src_pointcloud: x (b, v, 3)
:param dst_pointcloud: y (b, v, 3)
:param weight: (v, ), duplication of vertices
:return: loss, R, t s.t. ||w(Rx+t-y)||_2^2 minimal.
"""
if len(src_pointcloud.shape) == 2:
src_pointcloud = src_pointcloud.unsqueeze(0)
if len(dst_pointcloud.shape) == 2:
dst_pointcloud = dst_pointcloud.unsqueeze(0)
bn = src_pointcloud.shape[0]
assert src_pointcloud.shape == dst_pointcloud.shape
assert src_pointcloud.shape[2] == 3
assert src_pointcloud.shape[1] == weight.shape[0]
assert len(weight.shape) == 1
w = weight[None, :, None]
def s1(a):
return a.sum(dim=1, keepdim=True)
w2 = w.pow(2)
sw2 = s1(w2)
X = src_pointcloud
Y = dst_pointcloud
wXYT = th.einsum("nji,njk->nik", w2 * (sw2 - w2) * X, Y)
U, s, V = batch_svd(wXYT)
UT = U.permute(0, 2, 1).contiguous()
det = batch_det(V.bmm(UT))
diag = th.ones_like(s).to(s.device)
diag[:, -1] = det
R = V.bmm(batch_diag(diag)).bmm(UT)
RX = th.einsum("bij,bnj->bni", R, X)
t = th.sum(w * (Y - RX), dim=1, keepdim=True) / sw2
loss = w * (RX + t - Y)
loss = F.mse_loss(loss, th.zeros_like(loss)) * 3
return loss, R, t |
Sends a potentially nested container of Tensors to the specified
device. Non-tensors are preserved as-is.
Args:
things: Container with tensors or other containers of tensors to send
to a GPU.
device: Device to send the tensors to.
cache: Optional dictionary to use as a cache for CUDAfied tensors. If
passed, use this cache to allocate a tensor once and then resize /
refill it on future calls to to_device() instead of reallocating
it.
key: If using the cache, store the tensor in this key, only for
internal use.
verbose: Print some info when a cached tensor is resized.
max_bs: Maximum batch size allowed for tensors in cache
non_blocking: if True and this copy is between CPU and GPU, the copy
may occur asynchronously with respect to the host. For other cases,
this argument has no effect.
Returns:
collection: The input collection with all tensors transferred to the given device. | def to_device(
things: TTensorNdarrayModuleOrContainer,
device: th.device,
cache: Optional[Dict[str, th.Tensor]] = None,
key: Optional[str] = None,
verbose: bool = False,
max_bs: Optional[int] = None,
non_blocking: bool = False,
) -> TTensorNdarrayModuleOrContainer:
"""Sends a potentially nested container of Tensors to the specified
device. Non-tensors are preserved as-is.
Args:
things: Container with tensors or other containers of tensors to send
to a GPU.
device: Device to send the tensors to.
cache: Optional dictionary to use as a cache for CUDAfied tensors. If
passed, use this cache to allocate a tensor once and then resize /
refill it on future calls to to_device() instead of reallocating
it.
key: If using the cache, store the tensor in this key, only for
internal use.
verbose: Print some info when a cached tensor is resized.
max_bs: Maximum batch size allowed for tensors in cache
non_blocking: if True and this copy is between CPU and GPU, the copy
may occur asynchronously with respect to the host. For other cases,
this argument has no effect.
Returns:
collection: The input collection with all tensors transferred to the given device.
"""
device = th.device(device)
pr = print if verbose else lambda *args, **kwargs: None
if isinstance(things, th.Tensor) and things.device != device:
if cache is not None:
assert key is not None
batch_size = things.shape[0]
if key in cache:
assert things.shape[1:] == cache[key].shape[1:]
if batch_size > cache[key].shape[0]:
pr("Resized:", key, "from", cache[key].shape[0], "to", batch_size)
cache[key].resize_as_(things)
else:
buf_shape = list(things.shape)
if max_bs is not None:
assert max_bs >= batch_size
buf_shape[0] = max_bs
cache[key] = th.zeros(*buf_shape, dtype=things.dtype, device=device)
pr("Allocated:", key, buf_shape)
cache[key][:batch_size].copy_(things, non_blocking=non_blocking)
return cache[key][:batch_size]
else:
return things.to(device, non_blocking=non_blocking)
elif isinstance(things, th.nn.Module):
return things.to(device, non_blocking=non_blocking)
elif isinstance(things, dict):
key = key + "." if key is not None else ""
return {
k: to_device(v, device, cache, key + k, verbose, max_bs, non_blocking)
for k, v in things.items()
}
elif isinstance(things, Sequence) and not isinstance(things, str):
key = key if key is not None else ""
out = [
to_device(v, device, cache, key + f"_{i}", verbose, max_bs, non_blocking)
for i, v in enumerate(things)
]
if isinstance(things, tuple):
out = tuple(out)
return out
elif isinstance(things, np.ndarray):
return to_device(th.from_numpy(things), device, cache, key, verbose, max_bs, non_blocking)
else:
return things |
Preprocess the dict of losses outputs. | def process_losses(
loss_dict: Dict[str, Any], reduce: bool = True, detach: bool = True
) -> Dict[str, th.Tensor]:
"""Preprocess the dict of losses outputs."""
result = {k.replace("loss_", ""): v for k, v in loss_dict.items() if k.startswith("loss_")}
if detach:
result = {k: v.detach() for k, v in result.items()}
if reduce:
result = {k: float(v.mean().item()) for k, v in result.items()}
return result |
Instantiate an object given a config and arguments. | def load_from_config(config: Mapping[str, Any], **kwargs):
"""Instantiate an object given a config and arguments."""
assert 'class_name' in config and 'module_name' not in config
config = copy.deepcopy(config)
ckpt = None if 'ckpt' not in config else config.pop('ckpt')
class_name = config.pop('class_name')
object_class = load_class(class_name)
instance = object_class(**config, **kwargs)
if ckpt is not None:
load_checkpoint(
ckpt_path=ckpt.path,
modules={ckpt.get('module_name', 'model'): instance},
ignore_names=ckpt.get('ignore_names', []),
strict=ckpt.get('strict', False),
)
return instance |
Saving regular summaries for monitoring purposes. | def save_file_summaries(path: str, summaries: Dict[str, Tuple[str, Any]]):
"""Saving regular summaries for monitoring purposes."""
for name, (value, ext) in summaries.items():
#save(f'{path}/{name}.{ext}', value)
raise NotImplementedError() |
Load a checkpoint.
Args:
ckpt_path: directory or the full path to the checkpoint | def load_checkpoint(
ckpt_path: str,
modules: Dict[str, Any],
iteration: int =None,
strict: bool =False,
map_location: Optional[str] =None,
ignore_names: Optional[Dict[str, List[str]]]=None,
):
"""Load a checkpoint.
Args:
ckpt_path: directory or the full path to the checkpoint
"""
if map_location is None:
map_location = "cpu"
# adding
if os.path.isdir(ckpt_path):
if iteration is None:
# lookup latest iteration
iteration = max(
[
int(os.path.splitext(os.path.basename(p))[0])
for p in glob.glob(os.path.join(ckpt_path, "*.pt"))
]
)
ckpt_path = os.path.join(ckpt_path, f"{iteration:06d}.pt")
logger.info(f"loading checkpoint {ckpt_path}")
ckpt_dict = th.load(ckpt_path, map_location=map_location)
for name, mod in modules.items():
params = ckpt_dict[name]
if ignore_names is not None and name in ignore_names:
logger.info(f"skipping: {ignore_names[name]}")
params = filter_params(params, ignore_names[name])
mod.load_state_dict(params, strict=strict) |
Enable Dora to load manifest from git clone repository. | def resolve_config_dset_paths(cfg):
"""Enable Dora to load manifest from git clone repository."""
# manifest files for the different splits
for key, value in cfg.datasource.items():
if isinstance(value, str):
cfg.datasource[key] = git_save.to_absolute_path(value) |
Given a XP, return the Solver object.
Args:
xp (XP): Dora experiment for which to retrieve the solver.
override_cfg (dict or None): If not None, should be a dict used to
override some values in the config of `xp`. This will not impact
the XP signature or folder. The format is different
than the one used in Dora grids, nested keys should actually be nested dicts,
not flattened, e.g. `{'optim': {'batch_size': 32}}`.
restore (bool): If `True` (the default), restore state from the last checkpoint.
load_best (bool): If `True` (the default), load the best state from the checkpoint.
ignore_state_keys (list[str]): List of sources to ignore when loading the state, e.g. `optimizer`.
disable_fsdp (bool): if True, disables FSDP entirely. This will
also automatically skip loading the EMA. For solver specific
state sources, like the optimizer, you might want to
use along `ignore_state_keys=['optimizer']`. Must be used with `load_best=True`. | def get_solver_from_xp(xp: XP, override_cfg: tp.Optional[tp.Union[dict, omegaconf.DictConfig]] = None,
restore: bool = True, load_best: bool = True,
ignore_state_keys: tp.List[str] = [], disable_fsdp: bool = True):
"""Given a XP, return the Solver object.
Args:
xp (XP): Dora experiment for which to retrieve the solver.
override_cfg (dict or None): If not None, should be a dict used to
override some values in the config of `xp`. This will not impact
the XP signature or folder. The format is different
than the one used in Dora grids, nested keys should actually be nested dicts,
not flattened, e.g. `{'optim': {'batch_size': 32}}`.
restore (bool): If `True` (the default), restore state from the last checkpoint.
load_best (bool): If `True` (the default), load the best state from the checkpoint.
ignore_state_keys (list[str]): List of sources to ignore when loading the state, e.g. `optimizer`.
disable_fsdp (bool): if True, disables FSDP entirely. This will
also automatically skip loading the EMA. For solver specific
state sources, like the optimizer, you might want to
use along `ignore_state_keys=['optimizer']`. Must be used with `load_best=True`.
"""
logger.info(f"Loading solver from XP {xp.sig}. "
f"Overrides used: {xp.argv}")
cfg = xp.cfg
if override_cfg is not None:
cfg = omegaconf.OmegaConf.merge(cfg, omegaconf.DictConfig(override_cfg))
if disable_fsdp and cfg.fsdp.use:
cfg.fsdp.use = False
assert load_best is True
# ignoring some keys that were FSDP sharded like model, ema, and best_state.
# fsdp_best_state will be used in that case. When using a specific solver,
# one is responsible for adding the relevant keys, e.g. 'optimizer'.
# We could make something to automatically register those inside the solver, but that
# seem overkill at this point.
ignore_state_keys = ignore_state_keys + ['model', 'ema', 'best_state']
try:
with xp.enter():
solver = get_solver(cfg)
if restore:
solver.restore(load_best=load_best, ignore_state_keys=ignore_state_keys)
return solver
finally:
hydra.core.global_hydra.GlobalHydra.instance().clear() |
Return Solver object from Dora signature, i.e. to play with it from a notebook.
See `get_solver_from_xp` for more information. | def get_solver_from_sig(sig: str, *args, **kwargs):
"""Return Solver object from Dora signature, i.e. to play with it from a notebook.
See `get_solver_from_xp` for more information.
"""
xp = main.get_xp_from_sig(sig)
return get_solver_from_xp(xp, *args, **kwargs) |
FFMPEG-based audio file reading using PyAV bindings.
Soundfile cannot read mp3 and av_read is more efficient than torchaudio.
Args:
filepath (str or Path): Path to audio file to read.
seek_time (float): Time at which to start reading in the file.
duration (float): Duration to read from the file. If set to -1, the whole file is read.
Returns:
tuple of torch.Tensor, int: Tuple containing audio data and sample rate | def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]:
"""FFMPEG-based audio file reading using PyAV bindings.
Soundfile cannot read mp3 and av_read is more efficient than torchaudio.
Args:
filepath (str or Path): Path to audio file to read.
seek_time (float): Time at which to start reading in the file.
duration (float): Duration to read from the file. If set to -1, the whole file is read.
Returns:
tuple of torch.Tensor, int: Tuple containing audio data and sample rate
"""
_init_av()
with av.open(str(filepath)) as af:
stream = af.streams.audio[0]
sr = stream.codec_context.sample_rate
num_frames = int(sr * duration) if duration >= 0 else -1
frame_offset = int(sr * seek_time)
# we need a small negative offset otherwise we get some edge artifact
# from the mp3 decoder.
af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream)
frames = []
length = 0
for frame in af.decode(streams=stream.index):
current_offset = int(frame.rate * frame.pts * frame.time_base)
strip = max(0, frame_offset - current_offset)
buf = torch.from_numpy(frame.to_ndarray())
if buf.shape[0] != stream.channels:
buf = buf.view(-1, stream.channels).t()
buf = buf[:, strip:]
frames.append(buf)
length += buf.shape[1]
if num_frames > 0 and length >= num_frames:
break
assert frames
# If the above assert fails, it is likely because we seeked past the end of file point,
# in which case ffmpeg returns a single frame with only zeros, and a weird timestamp.
# This will need proper debugging, in due time.
wav = torch.cat(frames, dim=1)
assert wav.shape[0] == stream.channels
if num_frames > 0:
wav = wav[:, :num_frames]
return f32_pcm(wav), sr |
Read audio by picking the most appropriate backend tool based on the audio format.
Args:
filepath (str or Path): Path to audio file to read.
seek_time (float): Time at which to start reading in the file.
duration (float): Duration to read from the file. If set to -1, the whole file is read.
pad (bool): Pad output audio if not reaching expected duration.
Returns:
tuple of torch.Tensor, int: Tuple containing audio data and sample rate. | def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,
duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:
"""Read audio by picking the most appropriate backend tool based on the audio format.
Args:
filepath (str or Path): Path to audio file to read.
seek_time (float): Time at which to start reading in the file.
duration (float): Duration to read from the file. If set to -1, the whole file is read.
pad (bool): Pad output audio if not reaching expected duration.
Returns:
tuple of torch.Tensor, int: Tuple containing audio data and sample rate.
"""
fp = Path(filepath)
if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg
# There is some bug with ffmpeg and reading flac
info = _soundfile_info(filepath)
frames = -1 if duration <= 0 else int(duration * info.sample_rate)
frame_offset = int(seek_time * info.sample_rate)
wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)
assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}"
wav = torch.from_numpy(wav).t().contiguous()
if len(wav.shape) == 1:
wav = torch.unsqueeze(wav, 0)
else:
wav, sr = _av_read(filepath, seek_time, duration)
if pad and duration > 0:
expected_frames = int(duration * sr)
wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))
return wav, sr |
Convenience function for saving audio to disk. Returns the filename the audio was written to.
Args:
stem_name (str or Path): Filename without extension which will be added automatically.
wav (torch.Tensor): Audio data to save.
sample_rate (int): Sample rate of audio data.
format (str): Either "wav", "mp3", "ogg", or "flac".
mp3_rate (int): kbps when using mp3s.
ogg_rate (int): kbps when using ogg/vorbis. If not provided, let ffmpeg decide for itself.
normalize (bool): if `True` (default), normalizes according to the prescribed
strategy (see after). If `False`, the strategy is only used in case clipping
would happen.
strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
with extra headroom to avoid clipping. 'clip' just clips.
peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
than the `peak_clip` one to avoid further clipping.
loudness_headroom_db (float): Target loudness for loudness normalization.
loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.
when strategy is 'loudness' log_clipping (bool): If True, basic logging on stderr when clipping still
occurs despite strategy (only for 'rms').
make_parent_dir (bool): Make parent directory if it doesn't exist.
Returns:
Path: Path of the saved audio. | def audio_write(stem_name: tp.Union[str, Path],
wav: torch.Tensor, sample_rate: int,
format: str = 'wav', mp3_rate: int = 320, ogg_rate: tp.Optional[int] = None,
normalize: bool = True, strategy: str = 'peak', peak_clip_headroom_db: float = 1,
rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
loudness_compressor: bool = False,
log_clipping: bool = True, make_parent_dir: bool = True,
add_suffix: bool = True) -> Path:
"""Convenience function for saving audio to disk. Returns the filename the audio was written to.
Args:
stem_name (str or Path): Filename without extension which will be added automatically.
wav (torch.Tensor): Audio data to save.
sample_rate (int): Sample rate of audio data.
format (str): Either "wav", "mp3", "ogg", or "flac".
mp3_rate (int): kbps when using mp3s.
ogg_rate (int): kbps when using ogg/vorbis. If not provided, let ffmpeg decide for itself.
normalize (bool): if `True` (default), normalizes according to the prescribed
strategy (see after). If `False`, the strategy is only used in case clipping
would happen.
strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
with extra headroom to avoid clipping. 'clip' just clips.
peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
than the `peak_clip` one to avoid further clipping.
loudness_headroom_db (float): Target loudness for loudness normalization.
loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.
when strategy is 'loudness' log_clipping (bool): If True, basic logging on stderr when clipping still
occurs despite strategy (only for 'rms').
make_parent_dir (bool): Make parent directory if it doesn't exist.
Returns:
Path: Path of the saved audio.
"""
assert wav.dtype.is_floating_point, "wav is not floating point"
if wav.dim() == 1:
wav = wav[None]
elif wav.dim() > 2:
raise ValueError("Input wav should be at most 2 dimension.")
assert wav.isfinite().all()
wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,
rms_headroom_db, loudness_headroom_db, loudness_compressor,
log_clipping=log_clipping, sample_rate=sample_rate,
stem_name=str(stem_name))
if format == 'mp3':
suffix = '.mp3'
flags = ['-f', 'mp3', '-c:a', 'libmp3lame', '-b:a', f'{mp3_rate}k']
elif format == 'wav':
suffix = '.wav'
flags = ['-f', 'wav', '-c:a', 'pcm_s16le']
elif format == 'ogg':
suffix = '.ogg'
flags = ['-f', 'ogg', '-c:a', 'libvorbis']
if ogg_rate is not None:
flags += ['-b:a', f'{ogg_rate}k']
elif format == 'flac':
suffix = '.flac'
flags = ['-f', 'flac']
else:
raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.")
if not add_suffix:
suffix = ''
path = Path(str(stem_name) + suffix)
if make_parent_dir:
path.parent.mkdir(exist_ok=True, parents=True)
try:
_piping_to_ffmpeg(path, wav, sample_rate, flags)
except Exception:
if path.exists():
# we do not want to leave half written files around.
path.unlink()
raise
return path |
AudioMeta from a path to an audio file.
Args:
file_path (str): Resolved path of valid audio file.
minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
Returns:
AudioMeta: Audio file path and its metadata. | def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta:
"""AudioMeta from a path to an audio file.
Args:
file_path (str): Resolved path of valid audio file.
minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
Returns:
AudioMeta: Audio file path and its metadata.
"""
info = audio_info(file_path)
amplitude: tp.Optional[float] = None
if not minimal:
wav, sr = audio_read(file_path)
amplitude = wav.abs().max().item()
return AudioMeta(file_path, info.duration, info.sample_rate, amplitude) |
If Dora is available as a dependency, try to resolve potential relative paths
in list of AudioMeta. This method is expected to be used when loading meta from file.
Args:
m (AudioMeta): Audio meta to resolve.
fast (bool): If True, uses a really fast check for determining if a file
is already absolute or not. Only valid on Linux/Mac.
Returns:
AudioMeta: Audio meta with resolved path. | def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta:
"""If Dora is available as a dependency, try to resolve potential relative paths
in list of AudioMeta. This method is expected to be used when loading meta from file.
Args:
m (AudioMeta): Audio meta to resolve.
fast (bool): If True, uses a really fast check for determining if a file
is already absolute or not. Only valid on Linux/Mac.
Returns:
AudioMeta: Audio meta with resolved path.
"""
def is_abs(m):
if fast:
return str(m)[0] == '/'
else:
os.path.isabs(str(m))
if not dora:
return m
if not is_abs(m.path):
m.path = dora.git_save.to_absolute_path(m.path)
if m.info_path is not None and not is_abs(m.info_path.zip_path):
m.info_path.zip_path = dora.git_save.to_absolute_path(m.path)
return m |
Build a list of AudioMeta from a given path,
collecting relevant audio files and fetching meta info.
Args:
path (str or Path): Path to folder containing audio files.
exts (list of str): List of file extensions to consider for audio files.
minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
progress (bool): Whether to log progress on audio files collection.
workers (int): number of parallel workers, if 0, use only the current thread.
Returns:
list of AudioMeta: List of audio file path and its metadata. | def find_audio_files(path: tp.Union[Path, str],
exts: tp.List[str] = DEFAULT_EXTS,
resolve: bool = True,
minimal: bool = True,
progress: bool = False,
workers: int = 0) -> tp.List[AudioMeta]:
"""Build a list of AudioMeta from a given path,
collecting relevant audio files and fetching meta info.
Args:
path (str or Path): Path to folder containing audio files.
exts (list of str): List of file extensions to consider for audio files.
minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
progress (bool): Whether to log progress on audio files collection.
workers (int): number of parallel workers, if 0, use only the current thread.
Returns:
list of AudioMeta: List of audio file path and its metadata.
"""
audio_files = []
futures: tp.List[Future] = []
pool: tp.Optional[ThreadPoolExecutor] = None
with ExitStack() as stack:
if workers > 0:
pool = ThreadPoolExecutor(workers)
stack.enter_context(pool)
if progress:
print("Finding audio files...")
for root, folders, files in os.walk(path, followlinks=True):
for file in files:
full_path = Path(root) / file
if full_path.suffix.lower() in exts:
audio_files.append(full_path)
if pool is not None:
futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal))
if progress:
print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr)
if progress:
print("Getting audio metadata...")
meta: tp.List[AudioMeta] = []
for idx, file_path in enumerate(audio_files):
try:
if pool is None:
m = _get_audio_meta(str(file_path), minimal)
else:
m = futures[idx].result()
if resolve:
m = _resolve_audio_meta(m)
except Exception as err:
print("Error with", str(file_path), err, file=sys.stderr)
continue
meta.append(m)
if progress:
print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr)
meta.sort()
return meta |
Load list of AudioMeta from an optionally compressed json file.
Args:
path (str or Path): Path to JSON file.
resolve (bool): Whether to resolve the path from AudioMeta (default=True).
fast (bool): activates some tricks to make things faster.
Returns:
list of AudioMeta: List of audio file path and its total duration. | def load_audio_meta(path: tp.Union[str, Path],
resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]:
"""Load list of AudioMeta from an optionally compressed json file.
Args:
path (str or Path): Path to JSON file.
resolve (bool): Whether to resolve the path from AudioMeta (default=True).
fast (bool): activates some tricks to make things faster.
Returns:
list of AudioMeta: List of audio file path and its total duration.
"""
open_fn = gzip.open if str(path).lower().endswith('.gz') else open
with open_fn(path, 'rb') as fp: # type: ignore
lines = fp.readlines()
meta = []
for line in lines:
d = json.loads(line)
m = AudioMeta.from_dict(d)
if resolve:
m = _resolve_audio_meta(m, fast=fast)
meta.append(m)
return meta |
Save the audio metadata to the file pointer as json.
Args:
path (str or Path): Path to JSON file.
metadata (list of BaseAudioMeta): List of audio meta to save. | def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]):
"""Save the audio metadata to the file pointer as json.
Args:
path (str or Path): Path to JSON file.
metadata (list of BaseAudioMeta): List of audio meta to save.
"""
Path(path).parent.mkdir(exist_ok=True, parents=True)
open_fn = gzip.open if str(path).lower().endswith('.gz') else open
with open_fn(path, 'wb') as fp: # type: ignore
for m in meta:
json_str = json.dumps(m.to_dict()) + '\n'
json_bytes = json_str.encode('utf-8')
fp.write(json_bytes) |
Convert audio to the given number of channels.
Args:
wav (torch.Tensor): Audio wave of shape [B, C, T].
channels (int): Expected number of channels as output.
Returns:
torch.Tensor: Downmixed or unchanged audio wave [B, C, T]. | def convert_audio_channels(wav: torch.Tensor, channels: int = 2) -> torch.Tensor:
"""Convert audio to the given number of channels.
Args:
wav (torch.Tensor): Audio wave of shape [B, C, T].
channels (int): Expected number of channels as output.
Returns:
torch.Tensor: Downmixed or unchanged audio wave [B, C, T].
"""
*shape, src_channels, length = wav.shape
if src_channels == channels:
pass
elif channels == 1:
# Case 1:
# The caller asked 1-channel audio, and the stream has multiple
# channels, downmix all channels.
wav = wav.mean(dim=-2, keepdim=True)
elif src_channels == 1:
# Case 2:
# The caller asked for multiple channels, but the input file has
# a single channel, replicate the audio over all channels.
wav = wav.expand(*shape, channels, length)
elif src_channels >= channels:
# Case 3:
# The caller asked for multiple channels, and the input file has
# more channels than requested. In that case return the first channels.
wav = wav[..., :channels, :]
else:
# Case 4: What is a reasonable choice here?
raise ValueError('The audio file has less channels than requested but is not mono.')
return wav |
Convert audio to new sample rate and number of audio channels. | def convert_audio(wav: torch.Tensor, from_rate: float,
to_rate: float, to_channels: int) -> torch.Tensor:
"""Convert audio to new sample rate and number of audio channels."""
wav = julius.resample_frac(wav, int(from_rate), int(to_rate))
wav = convert_audio_channels(wav, to_channels)
return wav |
Normalize an input signal to a user loudness in dB LKFS.
Audio loudness is defined according to the ITU-R BS.1770-4 recommendation.
Args:
wav (torch.Tensor): Input multichannel audio data.
sample_rate (int): Sample rate.
loudness_headroom_db (float): Target loudness of the output in dB LUFS.
loudness_compressor (bool): Uses tanh for soft clipping.
energy_floor (float): anything below that RMS level will not be rescaled.
Returns:
torch.Tensor: Loudness normalized output data. | def normalize_loudness(wav: torch.Tensor, sample_rate: int, loudness_headroom_db: float = 14,
loudness_compressor: bool = False, energy_floor: float = 2e-3):
"""Normalize an input signal to a user loudness in dB LKFS.
Audio loudness is defined according to the ITU-R BS.1770-4 recommendation.
Args:
wav (torch.Tensor): Input multichannel audio data.
sample_rate (int): Sample rate.
loudness_headroom_db (float): Target loudness of the output in dB LUFS.
loudness_compressor (bool): Uses tanh for soft clipping.
energy_floor (float): anything below that RMS level will not be rescaled.
Returns:
torch.Tensor: Loudness normalized output data.
"""
energy = wav.pow(2).mean().sqrt().item()
if energy < energy_floor:
return wav
transform = torchaudio.transforms.Loudness(sample_rate)
input_loudness_db = transform(wav).item()
# calculate the gain needed to scale to the desired loudness level
delta_loudness = -loudness_headroom_db - input_loudness_db
gain = 10.0 ** (delta_loudness / 20.0)
output = gain * wav
if loudness_compressor:
output = torch.tanh(output)
assert output.isfinite().all(), (input_loudness_db, wav.pow(2).mean().sqrt())
return output |
Utility function to clip the audio with logging if specified. | def _clip_wav(wav: torch.Tensor, log_clipping: bool = False, stem_name: tp.Optional[str] = None) -> None:
"""Utility function to clip the audio with logging if specified."""
max_scale = wav.abs().max()
if log_clipping and max_scale > 1:
clamp_prob = (wav.abs() > 1).float().mean().item()
print(f"CLIPPING {stem_name or ''} happening with proba (a bit of clipping is okay):",
clamp_prob, "maximum scale: ", max_scale.item(), file=sys.stderr)
wav.clamp_(-1, 1) |
Normalize the audio according to the prescribed strategy (see after).
Args:
wav (torch.Tensor): Audio data.
normalize (bool): if `True` (default), normalizes according to the prescribed
strategy (see after). If `False`, the strategy is only used in case clipping
would happen.
strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
with extra headroom to avoid clipping. 'clip' just clips.
peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
than the `peak_clip` one to avoid further clipping.
loudness_headroom_db (float): Target loudness for loudness normalization.
loudness_compressor (bool): If True, uses tanh based soft clipping.
log_clipping (bool): If True, basic logging on stderr when clipping still
occurs despite strategy (only for 'rms').
sample_rate (int): Sample rate for the audio data (required for loudness).
stem_name (str, optional): Stem name for clipping logging.
Returns:
torch.Tensor: Normalized audio. | def normalize_audio(wav: torch.Tensor, normalize: bool = True,
strategy: str = 'peak', peak_clip_headroom_db: float = 1,
rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
loudness_compressor: bool = False, log_clipping: bool = False,
sample_rate: tp.Optional[int] = None,
stem_name: tp.Optional[str] = None) -> torch.Tensor:
"""Normalize the audio according to the prescribed strategy (see after).
Args:
wav (torch.Tensor): Audio data.
normalize (bool): if `True` (default), normalizes according to the prescribed
strategy (see after). If `False`, the strategy is only used in case clipping
would happen.
strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
with extra headroom to avoid clipping. 'clip' just clips.
peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
than the `peak_clip` one to avoid further clipping.
loudness_headroom_db (float): Target loudness for loudness normalization.
loudness_compressor (bool): If True, uses tanh based soft clipping.
log_clipping (bool): If True, basic logging on stderr when clipping still
occurs despite strategy (only for 'rms').
sample_rate (int): Sample rate for the audio data (required for loudness).
stem_name (str, optional): Stem name for clipping logging.
Returns:
torch.Tensor: Normalized audio.
"""
scale_peak = 10 ** (-peak_clip_headroom_db / 20)
scale_rms = 10 ** (-rms_headroom_db / 20)
if strategy == 'peak':
rescaling = (scale_peak / wav.abs().max())
if normalize or rescaling < 1:
wav = wav * rescaling
elif strategy == 'clip':
wav = wav.clamp(-scale_peak, scale_peak)
elif strategy == 'rms':
mono = wav.mean(dim=0)
rescaling = scale_rms / mono.pow(2).mean().sqrt()
if normalize or rescaling < 1:
wav = wav * rescaling
_clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
elif strategy == 'loudness':
assert sample_rate is not None, "Loudness normalization requires sample rate."
wav = normalize_loudness(wav, sample_rate, loudness_headroom_db, loudness_compressor)
_clip_wav(wav, log_clipping=log_clipping, stem_name=stem_name)
else:
assert wav.abs().max() < 1
assert strategy == '' or strategy == 'none', f"Unexpected strategy: '{strategy}'"
return wav |
Convert audio to float 32 bits PCM format.
| def f32_pcm(wav: torch.Tensor) -> torch.Tensor:
"""Convert audio to float 32 bits PCM format.
"""
if wav.dtype.is_floating_point:
return wav
elif wav.dtype == torch.int16:
return wav.float() / 2**15
elif wav.dtype == torch.int32:
return wav.float() / 2**31
raise ValueError(f"Unsupported wav dtype: {wav.dtype}") |
Convert audio to int 16 bits PCM format.
..Warning:: There exist many formula for doing this conversion. None are perfect
due to the asymmetry of the int16 range. One either have possible clipping, DC offset,
or inconsistencies with f32_pcm. If the given wav doesn't have enough headroom,
it is possible that `i16_pcm(f32_pcm)) != Identity`. | def i16_pcm(wav: torch.Tensor) -> torch.Tensor:
"""Convert audio to int 16 bits PCM format.
..Warning:: There exist many formula for doing this conversion. None are perfect
due to the asymmetry of the int16 range. One either have possible clipping, DC offset,
or inconsistencies with f32_pcm. If the given wav doesn't have enough headroom,
it is possible that `i16_pcm(f32_pcm)) != Identity`.
"""
if wav.dtype.is_floating_point:
assert wav.abs().max() <= 1
candidate = (wav * 2 ** 15).round()
if candidate.max() >= 2 ** 15: # clipping would occur
candidate = (wav * (2 ** 15 - 1)).round()
return candidate.short()
else:
assert wav.dtype == torch.int16
return wav |
Monkey-patch meta to match cluster specificities. | def _clusterify_meta(meta: AudioMeta) -> AudioMeta:
"""Monkey-patch meta to match cluster specificities."""
meta.path = AudioCraftEnvironment.apply_dataset_mappers(meta.path)
if meta.info_path is not None:
meta.info_path.zip_path = AudioCraftEnvironment.apply_dataset_mappers(meta.info_path.zip_path)
return meta |
Monkey-patch all meta to match cluster specificities. | def clusterify_all_meta(meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]:
"""Monkey-patch all meta to match cluster specificities."""
return [_clusterify_meta(m) for m in meta] |
Preprocess a single keyword or possible a list of keywords. | def get_keyword_or_keyword_list(value: tp.Optional[str]) -> tp.Union[tp.Optional[str], tp.Optional[tp.List[str]]]:
"""Preprocess a single keyword or possible a list of keywords."""
if isinstance(value, list):
return get_keyword_list(value)
else:
return get_keyword(value) |
Preprocess a single keyword. | def get_string(value: tp.Optional[str]) -> tp.Optional[str]:
"""Preprocess a single keyword."""
if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None':
return None
else:
return value.strip() |
Preprocess a single keyword. | def get_keyword(value: tp.Optional[str]) -> tp.Optional[str]:
"""Preprocess a single keyword."""
if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None':
return None
else:
return value.strip().lower() |
Preprocess a list of keywords. | def get_keyword_list(values: tp.Union[str, tp.List[str]]) -> tp.Optional[tp.List[str]]:
"""Preprocess a list of keywords."""
if isinstance(values, str):
values = [v.strip() for v in re.split(r'[,\s]', values)]
elif isinstance(values, float) and math.isnan(values):
values = []
if not isinstance(values, list):
logger.debug(f"Unexpected keyword list {values}")
values = [str(values)]
kws = [get_keyword(v) for v in values]
kw_list = [k for k in kws if k is not None]
if len(kw_list) == 0:
return None
else:
return kw_list |
Augment MusicInfo description with additional metadata fields and potential dropout.
Additional textual attributes are added given probability 'merge_text_conditions_p' and
the original textual description is dropped from the augmented description given probability drop_desc_p.
Args:
music_info (MusicInfo): The music metadata to augment.
merge_text_p (float): Probability of merging additional metadata to the description.
If provided value is 0, then no merging is performed.
drop_desc_p (float): Probability of dropping the original description on text merge.
if provided value is 0, then no drop out is performed.
drop_other_p (float): Probability of dropping the other fields used for text augmentation.
Returns:
MusicInfo: The MusicInfo with augmented textual description. | def augment_music_info_description(music_info: MusicInfo, merge_text_p: float = 0.,
drop_desc_p: float = 0., drop_other_p: float = 0.) -> MusicInfo:
"""Augment MusicInfo description with additional metadata fields and potential dropout.
Additional textual attributes are added given probability 'merge_text_conditions_p' and
the original textual description is dropped from the augmented description given probability drop_desc_p.
Args:
music_info (MusicInfo): The music metadata to augment.
merge_text_p (float): Probability of merging additional metadata to the description.
If provided value is 0, then no merging is performed.
drop_desc_p (float): Probability of dropping the original description on text merge.
if provided value is 0, then no drop out is performed.
drop_other_p (float): Probability of dropping the other fields used for text augmentation.
Returns:
MusicInfo: The MusicInfo with augmented textual description.
"""
def is_valid_field(field_name: str, field_value: tp.Any) -> bool:
valid_field_name = field_name in ['key', 'bpm', 'genre', 'moods', 'instrument', 'keywords']
valid_field_value = field_value is not None and isinstance(field_value, (int, float, str, list))
keep_field = random.uniform(0, 1) < drop_other_p
return valid_field_name and valid_field_value and keep_field
def process_value(v: tp.Any) -> str:
if isinstance(v, (int, float, str)):
return str(v)
if isinstance(v, list):
return ", ".join(v)
else:
raise ValueError(f"Unknown type for text value! ({type(v), v})")
description = music_info.description
metadata_text = ""
if random.uniform(0, 1) < merge_text_p:
meta_pairs = [f'{_field.name}: {process_value(getattr(music_info, _field.name))}'
for _field in fields(music_info) if is_valid_field(_field.name, getattr(music_info, _field.name))]
random.shuffle(meta_pairs)
metadata_text = ". ".join(meta_pairs)
description = description if not random.uniform(0, 1) < drop_desc_p else None
logger.debug(f"Applying text augmentation on MMI info. description: {description}, metadata: {metadata_text}")
if description is None:
description = metadata_text if len(metadata_text) > 1 else None
else:
description = ". ".join([description.rstrip('.'), metadata_text])
description = description.strip() if description else None
music_info = replace(music_info)
music_info.description = description
return music_info |
Preprocess key keywords, discarding them if there are multiple key defined. | def get_musical_key(value: tp.Optional[str]) -> tp.Optional[str]:
"""Preprocess key keywords, discarding them if there are multiple key defined."""
if value is None or (not isinstance(value, str)) or len(value) == 0 or value == 'None':
return None
elif ',' in value:
# For now, we discard when multiple keys are defined separated with comas
return None
else:
return value.strip().lower() |
Preprocess to a float. | def get_bpm(value: tp.Optional[str]) -> tp.Optional[float]:
"""Preprocess to a float."""
if value is None:
return None
try:
return float(value)
except ValueError:
return None |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.