response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Zero out the parameters of a module and return it.
def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module
Scale the parameters of a module and return it.
def scale_module(module, scale): """ Scale the parameters of a module and return it. """ for p in module.parameters(): p.detach().mul_(scale) return module
Take the mean over all non-batch dimensions.
def mean_flat(tensor): """ Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape))))
Make a standard normalization layer. :param channels: number of input channels. :return: an nn.Module for normalization.
def normalization(channels): """ Make a standard normalization layer. :param channels: number of input channels. :return: an nn.Module for normalization. """ return GroupNorm32(32, channels)
Create a 1D, 2D, or 3D convolution module.
def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}")
Create a linear module.
def linear(*args, **kwargs): """ Create a linear module. """ return nn.Linear(*args, **kwargs)
Create a 1D, 2D, or 3D average pooling module.
def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}")
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 Compute the KL divergence between two gaussians. Shapes are automatically broadcasted, so batches can be compared to scalars, among other use cases.
def normal_kl(mean1, logvar1, mean2, logvar2): """ source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 Compute the KL divergence between two gaussians. Shapes are automatically broadcasted, so batches can be compared to scalars, among other use cases. """ tensor = None for obj in (mean1, logvar1, mean2, logvar2): if isinstance(obj, torch.Tensor): tensor = obj break assert tensor is not None, "at least one argument must be a Tensor" # Force variances to be Tensors. Broadcasting helps convert scalars to # Tensors, but it does not work for torch.exp(). logvar1, logvar2 = [ x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) for x in (logvar1, logvar2) ] return 0.5 * ( -1.0 + logvar2 - logvar1 + torch.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) )
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self
enumerate available model architectures based on config files
def list_models(): """enumerate available model architectures based on config files""" return list(_MODEL_CONFIGS.keys())
add model config path or file and update registry
def add_model_config(path): """add model config path or file and update registry""" if not isinstance(path, Path): path = Path(path) _MODEL_CONFIG_PATHS.append(path) _rescan_model_configs()
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument.
def drop_path(x, drop_prob: float = 0., training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output
Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): # type: (Tensor, float, float, float, float) -> Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x
Convert applicable model parameters to fp16
def convert_weights_to_fp16(model: nn.Module): """Convert applicable model parameters to fp16""" def _convert_weights_to_fp16(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.half() if l.bias is not None: l.bias.data = l.bias.data.half() if isinstance(l, nn.MultiheadAttention): for attr in [ *[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v", ]: tensor = getattr(l, attr) if tensor is not None: tensor.data = tensor.data.half() for name in ["text_projection", "proj"]: if hasattr(l, name): attr = getattr(l, name) if attr is not None: attr.data = attr.data.half() model.apply(_convert_weights_to_fp16)
Returns the names of available CLIP models
def list_openai_models() -> List[str]: """Returns the names of available CLIP models""" return list_pretrained_tag_models('openai')
Load a CLIP model, preserve its text pretrained part, and set in the CLAP model Parameters ---------- name : str A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict device : Union[str, torch.device] The device to put the loaded model jit : bool Whether to load the optimized JIT model (default) or more hackable non-JIT model. Returns ------- model : torch.nn.Module The CLAP model preprocess : Callable[[PIL.Image], torch.Tensor] A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
def load_openai_model( name: str, model_cfg, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True, cache_dir=os.path.expanduser("~/.cache/clip"), enable_fusion: bool = False, fusion_type: str = 'None' ): """Load a CLIP model, preserve its text pretrained part, and set in the CLAP model Parameters ---------- name : str A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict device : Union[str, torch.device] The device to put the loaded model jit : bool Whether to load the optimized JIT model (default) or more hackable non-JIT model. Returns ------- model : torch.nn.Module The CLAP model preprocess : Callable[[PIL.Image], torch.Tensor] A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input """ if get_pretrained_url(name, 'openai'): model_path = download_pretrained(get_pretrained_url(name, 'openai'), root=cache_dir) elif os.path.isfile(name): model_path = name else: raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}") try: # loading JIT archive model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() state_dict = None except RuntimeError: # loading saved state dict if jit: warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") jit = False state_dict = torch.load(model_path, map_location="cpu") if not jit: try: model = build_model_from_openai_state_dict(state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type).to(device) except KeyError: sd = {k[7:]: v for k, v in state_dict["state_dict"].items()} model = build_model_from_openai_state_dict(sd, model_cfg, enable_fusion, fusion_type).to(device) if str(device) == "cpu": model.float() return model # patch the device names device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] def patch_device(module): try: graphs = [module.graph] if hasattr(module, "graph") else [] except RuntimeError: graphs = [] if hasattr(module, "forward1"): graphs.append(module.forward1.graph) for graph in graphs: for node in graph.findAllNodes("prim::Constant"): if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): node.copyAttributes(device_node) model.apply(patch_device) patch_device(model.encode_audio) patch_device(model.encode_text) # patch dtype to float32 on CPU if str(device) == "cpu": float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] float_node = float_input.node() def patch_float(module): try: graphs = [module.graph] if hasattr(module, "graph") else [] except RuntimeError: graphs = [] if hasattr(module, "forward1"): graphs.append(module.forward1.graph) for graph in graphs: for node in graph.findAllNodes("aten::to"): inputs = list(node.inputs()) for i in [1, 2]: # dtype can be the second or third argument to aten::to() if inputs[i].node()["value"] == 5: inputs[i].node().copyAttributes(float_node) model.apply(patch_float) patch_float(model.encode_audio) patch_float(model.encode_text) model.float() model.audio_branch.audio_length = model.audio_cfg.audio_length return model
Initialize a Linear or Convolutional layer.
def init_layer(layer): """Initialize a Linear or Convolutional layer. """ nn.init.xavier_uniform_(layer.weight) if hasattr(layer, 'bias'): if layer.bias is not None: layer.bias.data.fill_(0.)
Initialize a Batchnorm layer.
def init_bn(bn): """Initialize a Batchnorm layer. """ bn.bias.data.fill_(0.) bn.weight.data.fill_(1.)
returns list of pretrained models Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
def list_pretrained(as_str: bool = False): """ returns list of pretrained models Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True """ return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
return all models having the specified pretrain tag
def list_pretrained_tag_models(tag: str): """ return all models having the specified pretrain tag """ models = [] for k in _PRETRAINED.keys(): if tag in _PRETRAINED[k]: models.append(k) return models
return all pretrain tags for the specified model architecture
def list_pretrained_model_tags(model: str): """ return all pretrain tags for the specified model architecture """ tags = [] if model in _PRETRAINED: tags.extend(_PRETRAINED[model].keys()) return tags
Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs))
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings).
def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs
Returns the tokenized representation of given input string(s) Parameters ---------- texts : Union[str, List[str]] An input string or a list of input strings to tokenize context_length : int The context length to use; all CLIP models use 77 as the context length Returns ------- A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor: """ Returns the tokenized representation of given input string(s) Parameters ---------- texts : Union[str, List[str]] An input string or a list of input strings to tokenize context_length : int The context length to use; all CLIP models use 77 as the context length Returns ------- A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] """ if isinstance(texts, str): texts = [texts] sot_token = _tokenizer.encoder["<start_of_text>"] eot_token = _tokenizer.encoder["<end_of_text>"] all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) for i, tokens in enumerate(all_tokens): if len(tokens) > context_length: tokens = tokens[:context_length] # Truncate result[i, :len(tokens)] = torch.tensor(tokens) return result
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and returned. Otherwise, the module is walked recursively and submodules are converted in place. Args: module (torch.nn.Module): Any PyTorch module. module_match (dict): Dictionary of full module names to freeze (all if empty) name (str): Full module name (prefix) Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
def freeze_batch_norm_2d(module, module_match={}, name=""): """ Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and returned. Otherwise, the module is walked recursively and submodules are converted in place. Args: module (torch.nn.Module): Any PyTorch module. module_match (dict): Dictionary of full module names to freeze (all if empty) name (str): Full module name (prefix) Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 """ res = module is_match = True if module_match: is_match = name in module_match if is_match and isinstance( module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm) ): res = FrozenBatchNorm2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for child_name, child in module.named_children(): full_child_name = ".".join([name, child_name]) if name else child_name new_child = freeze_batch_norm_2d(child, module_match, full_child_name) if new_child is not child: res.add_module(child_name, new_child) return res
Check if dataset exists
def exist(dataset_name, dataset_type): """ Check if dataset exists """ if dataset_type in dataset_split[dataset_name]: return True else: return False
Get tar path from dataset name and type
def get_tar_path_from_dataset_name( dataset_names, dataset_types, islocal, dataset_path, proportion=1, full_dataset=None ): """ Get tar path from dataset name and type """ output = [] for n in dataset_names: if full_dataset is not None and n in full_dataset: current_dataset_types = dataset_split[n] else: current_dataset_types = dataset_types for s in current_dataset_types: tmp = [] if islocal: sizefilepath_ = f"{dataset_path}/{n}/{s}/sizes.json" if not os.path.exists(sizefilepath_): sizefilepath_ = f"./json_files/{n}/{s}/sizes.json" else: sizefilepath_ = f"./json_files/{n}/{s}/sizes.json" if not os.path.exists(sizefilepath_): continue sizes = json.load(open(sizefilepath_, "r")) for k in sizes.keys(): if islocal: tmp.append(f"{dataset_path}/{n}/{s}/{k}") else: tmp.append( f"pipe:aws s3 --cli-connect-timeout 0 cp s3://s-laion-audio/webdataset_tar/{n}/{s}/{k} -" ) if proportion != 1: tmp = random.sample(tmp, int(proportion * len(tmp))) output.append(tmp) return sum(output, [])
Get tar path from txt path
def get_tar_path_from_txts(txt_path, islocal, proportion=1): """ Get tar path from txt path """ if isinstance(txt_path, (list, tuple)): return sum( [ get_tar_path_from_txts( txt_path[i], islocal=islocal, proportion=proportion ) for i in range(len(txt_path)) ], [], ) if isinstance(txt_path, str): with open(txt_path) as f: lines = f.readlines() if islocal: lines = [ lines[i] .split("\n")[0] .replace("pipe:aws s3 cp s3://s-laion-audio/", "/mnt/audio_clip/") for i in range(len(lines)) ] else: lines = [ lines[i].split("\n")[0].replace(".tar", ".tar -") for i in range(len(lines)) ] if proportion != 1: print("Sampling tars with proportion of {}".format(proportion)) lines = random.sample(lines, int(proportion * len(lines))) return lines
Args: x: (batch_size , ...) mixup_lambda: (batch_size,) Returns: out: (batch_size, ...)
def do_mixup(x, mixup_lambda): """ Args: x: (batch_size , ...) mixup_lambda: (batch_size,) Returns: out: (batch_size, ...) """ out = ( x.transpose(0, -1) * mixup_lambda + torch.flip(x, dims=[0]).transpose(0, -1) * (1 - mixup_lambda) ).transpose(0, -1) return out
Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: x: (batch_size, time_steps, classes_num) ratio: int, ratio to interpolate Returns: upsampled: (batch_size, time_steps * ratio, classes_num)
def interpolate(x, ratio): """Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: x: (batch_size, time_steps, classes_num) ratio: int, ratio to interpolate Returns: upsampled: (batch_size, time_steps * ratio, classes_num) """ (batch_size, time_steps, classes_num) = x.shape upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1) upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num) return upsampled
Pad framewise_output to the same length as input frames. The pad value is the same as the value of the last frame. Args: framewise_output: (batch_size, frames_num, classes_num) frames_num: int, number of frames to pad Outputs: output: (batch_size, frames_num, classes_num)
def pad_framewise_output(framewise_output, frames_num): """Pad framewise_output to the same length as input frames. The pad value is the same as the value of the last frame. Args: framewise_output: (batch_size, frames_num, classes_num) frames_num: int, number of frames to pad Outputs: output: (batch_size, frames_num, classes_num) """ pad = framewise_output[:, -1:, :].repeat( 1, frames_num - framewise_output.shape[1], 1 ) """tensor for padding""" output = torch.cat((framewise_output, pad), dim=1) """(batch_size, frames_num, classes_num)"""
Output dictionary from out.txt log file
def get_data_from_log(txt_path): """ Output dictionary from out.txt log file """ with open(txt_path) as f: lines = f.readlines() val_data = {} train_data = {} train_losses = [] train_losses_epoch = [] for i in range(len(lines)): if "| INFO |" in lines[i]: if "Eval Epoch" in lines[i]: if "val_loss" in lines[i]: # float(regex.sub("", lines[310].split(" ")[-1]).replace(" ", "")) line = lines[i].split("Eval Epoch: ")[-1] num_epoch = int(line.split(" ")[0].split(" ")[0]) d = { line.split(" ")[0] .split(" ")[1] .replace(":", ""): float(line.split(" ")[0].split(" ")[-1]) } for i in range(1, len(line.split(" "))): d = save_to_dict(line.split(" ")[i], d) val_data[num_epoch] = d elif "Train Epoch" in lines[i]: num_epoch = int(lines[i].split("Train Epoch: ")[1][0]) loss = float(lines[i].split("Loss: ")[-1].split(" (")[0]) train_losses.append(loss) train_losses_epoch.append(num_epoch) for i in range(len(train_losses)): train_data[i] = { "num_epoch": train_losses_epoch[i], "train_loss": train_losses[i], } return train_data, val_data
Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image
def modcrop_np(img, sf): ''' Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image ''' w, h = img.shape[:2] im = np.copy(img) return im[:w - w % sf, :h - h % sf, ...]
Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)
def analytic_kernel(k): """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" k_size = k.shape[0] # Calculate the big kernels size big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] # Normalize to 1 return cropped_big_k / cropped_big_k.sum()
generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """ generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel """ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) D = np.array([[l1, 0], [0, l2]]) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) return k
shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction
def shift_pixel(x, sf, upper_left=True): """shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction """ h, w = x.shape[:2] shift = (sf - 1) * 0.5 xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) if upper_left: x1 = xv + shift y1 = yv + shift else: x1 = xv - shift y1 = yv - shift x1 = np.clip(x1, 0, w - 1) y1 = np.clip(y1, 0, h - 1) if x.ndim == 2: x = interp2d(xv, yv, x)(x1, y1) if x.ndim == 3: for i in range(x.shape[-1]): x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) return x
x: image, NxcxHxW k: kernel, Nx1xhxw
def blur(x, k): ''' x: image, NxcxHxW k: kernel, Nx1xhxw ''' n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x
" # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): """" # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf """ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix lambda_1 = min_var + np.random.rand() * (max_var - min_var) lambda_2 = min_var + np.random.rand() * (max_var - min_var) theta = np.random.rand() * np.pi # random theta noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 # Set COV matrix using Lambdas and Theta LAMBDA = np.diag([lambda_1, lambda_2]) Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) SIGMA = Q @ LAMBDA @ Q.T INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] # Set expectation position (shifting kernel for aligned image) MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) MU = MU[None, None, :, None] # Create meshgrid for Gaussian [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) Z = np.stack([X, Y], 2)[:, :, :, None] # Calcualte Gaussian for every pixel of the kernel ZZ = Z - MU ZZ_t = ZZ.transpose(0, 1, 3, 2) raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) # shift the kernel so it will be centered # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) # Normalize the kernel and return # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) kernel = raw_kernel / np.sum(raw_kernel) return kernel
python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
def fspecial(filter_type, *args, **kwargs): ''' python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py ''' if filter_type == 'gaussian': return fspecial_gaussian(*args, **kwargs) if filter_type == 'laplacian': return fspecial_laplacian(*args, **kwargs)
Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image
def bicubic_degradation(x, sf=3): ''' Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image ''' x = util.imresize_np(x, scale=1 / sf) return x
blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} }
def srmd_degradation(x, k, sf=3): ''' blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} } ''' x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' x = bicubic_degradation(x, sf=sf) return x
bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} }
def dpsr_degradation(x, k, sf=3): ''' bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} } ''' x = bicubic_degradation(x, sf=sf) x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') return x
blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image
def classical_degradation(x, k, sf=3): ''' blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image ''' x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) st = 0 return x[st::sf, st::sf, ...]
USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int):
def add_sharpening(img, weight=0.5, radius=50, threshold=10): """USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int): """ if radius % 2 == 0: radius += 1 blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold mask = mask.astype('float32') soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) K = img + weight * residual K = np.clip(K, 0, 1) return soft_mask * K + (1 - soft_mask) * img
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = img.shape[:2] img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f'img size ({h1}X{w1}) is too small!') hq = img.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), interpolation=random.choice([1, 2, 3])) else: img = util.imresize_np(img, 1 / 2, True) img = np.clip(img, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_blur(img, sf=sf) elif i == 2: a, b = img.shape[1], img.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) else: k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') img = img[0::sf, 0::sf, ...] # nearest downsampling img = np.clip(img, 0.0, 1.0) elif i == 3: # downsample3 img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) img = np.clip(img, 0.0, 1.0) elif i == 4: # add Gaussian noise img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: img = add_JPEG_noise(img) elif i == 6: # add processed camera sensor noise if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf_ori, lq_patchsize) return img, hq
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
def degradation_bsrgan_variant(image, sf=4, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ image = util.uint2single(image) isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = image.shape[:2] image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = image.shape[:2] hq = image.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), interpolation=random.choice([1, 2, 3])) else: image = util.imresize_np(image, 1 / 2, True) image = np.clip(image, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] for i in shuffle_order: if i == 0: image = add_blur(image, sf=sf) elif i == 1: image = add_blur(image, sf=sf) elif i == 2: a, b = image.shape[1], image.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), interpolation=random.choice([1, 2, 3])) else: k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') image = image[0::sf, 0::sf, ...] # nearest downsampling image = np.clip(image, 0.0, 1.0) elif i == 3: # downsample3 image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) image = np.clip(image, 0.0, 1.0) elif i == 4: # add Gaussian noise image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: image = add_JPEG_noise(image) # elif i == 6: # # add processed camera sensor noise # if random.random() < isp_prob and isp_model is not None: # with torch.no_grad(): # img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise image = add_JPEG_noise(image) image = util.single2uint(image) example = {"image":image} return example
This is an extended degradation model by combining the degradation models of BSRGAN and Real-ESRGAN ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor use_shuffle: the degradation shuffle use_sharp: sharpening the img Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): """ This is an extended degradation model by combining the degradation models of BSRGAN and Real-ESRGAN ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor use_shuffle: the degradation shuffle use_sharp: sharpening the img Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ h1, w1 = img.shape[:2] img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f'img size ({h1}X{w1}) is too small!') if use_sharp: img = add_sharpening(img) hq = img.copy() if random.random() < shuffle_prob: shuffle_order = random.sample(range(13), 13) else: shuffle_order = list(range(13)) # local shuffle for noise, JPEG is always the last one shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_resize(img, sf=sf) elif i == 2: img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 3: if random.random() < poisson_prob: img = add_Poisson_noise(img) elif i == 4: if random.random() < speckle_prob: img = add_speckle_noise(img) elif i == 5: if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) elif i == 6: img = add_JPEG_noise(img) elif i == 7: img = add_blur(img, sf=sf) elif i == 8: img = add_resize(img, sf=sf) elif i == 9: img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 10: if random.random() < poisson_prob: img = add_Poisson_noise(img) elif i == 11: if random.random() < speckle_prob: img = add_speckle_noise(img) elif i == 12: if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) else: print('check the shuffle!') # resize to desired size img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), interpolation=random.choice([1, 2, 3])) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf, lq_patchsize) return img, hq
Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image
def modcrop_np(img, sf): ''' Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image ''' w, h = img.shape[:2] im = np.copy(img) return im[:w - w % sf, :h - h % sf, ...]
Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)
def analytic_kernel(k): """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" k_size = k.shape[0] # Calculate the big kernels size big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] # Normalize to 1 return cropped_big_k / cropped_big_k.sum()
generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """ generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel """ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) D = np.array([[l1, 0], [0, l2]]) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) return k
shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction
def shift_pixel(x, sf, upper_left=True): """shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction """ h, w = x.shape[:2] shift = (sf - 1) * 0.5 xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) if upper_left: x1 = xv + shift y1 = yv + shift else: x1 = xv - shift y1 = yv - shift x1 = np.clip(x1, 0, w - 1) y1 = np.clip(y1, 0, h - 1) if x.ndim == 2: x = interp2d(xv, yv, x)(x1, y1) if x.ndim == 3: for i in range(x.shape[-1]): x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) return x
x: image, NxcxHxW k: kernel, Nx1xhxw
def blur(x, k): ''' x: image, NxcxHxW k: kernel, Nx1xhxw ''' n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x
" # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): """" # modified version of https://github.com/assafshocher/BlindSR_dataset_generator # Kai Zhang # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var # max_var = 2.5 * sf """ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix lambda_1 = min_var + np.random.rand() * (max_var - min_var) lambda_2 = min_var + np.random.rand() * (max_var - min_var) theta = np.random.rand() * np.pi # random theta noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 # Set COV matrix using Lambdas and Theta LAMBDA = np.diag([lambda_1, lambda_2]) Q = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) SIGMA = Q @ LAMBDA @ Q.T INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] # Set expectation position (shifting kernel for aligned image) MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) MU = MU[None, None, :, None] # Create meshgrid for Gaussian [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) Z = np.stack([X, Y], 2)[:, :, :, None] # Calcualte Gaussian for every pixel of the kernel ZZ = Z - MU ZZ_t = ZZ.transpose(0, 1, 3, 2) raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) # shift the kernel so it will be centered # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) # Normalize the kernel and return # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) kernel = raw_kernel / np.sum(raw_kernel) return kernel
python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
def fspecial(filter_type, *args, **kwargs): ''' python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py ''' if filter_type == 'gaussian': return fspecial_gaussian(*args, **kwargs) if filter_type == 'laplacian': return fspecial_laplacian(*args, **kwargs)
Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image
def bicubic_degradation(x, sf=3): ''' Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image ''' x = util.imresize_np(x, scale=1 / sf) return x
blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} }
def srmd_degradation(x, k, sf=3): ''' blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} } ''' x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' x = bicubic_degradation(x, sf=sf) return x
bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} }
def dpsr_degradation(x, k, sf=3): ''' bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} } ''' x = bicubic_degradation(x, sf=sf) x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') return x
blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image
def classical_degradation(x, k, sf=3): ''' blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image ''' x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) st = 0 return x[st::sf, st::sf, ...]
USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int):
def add_sharpening(img, weight=0.5, radius=50, threshold=10): """USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int): """ if radius % 2 == 0: radius += 1 blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold mask = mask.astype('float32') soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) K = img + weight * residual K = np.clip(K, 0, 1) return soft_mask * K + (1 - soft_mask) * img
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = img.shape[:2] img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f'img size ({h1}X{w1}) is too small!') hq = img.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), interpolation=random.choice([1, 2, 3])) else: img = util.imresize_np(img, 1 / 2, True) img = np.clip(img, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_blur(img, sf=sf) elif i == 2: a, b = img.shape[1], img.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) else: k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') img = img[0::sf, 0::sf, ...] # nearest downsampling img = np.clip(img, 0.0, 1.0) elif i == 3: # downsample3 img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) img = np.clip(img, 0.0, 1.0) elif i == 4: # add Gaussian noise img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: img = add_JPEG_noise(img) elif i == 6: # add processed camera sensor noise if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf_ori, lq_patchsize) return img, hq
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
def degradation_bsrgan_variant(image, sf=4, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ image = util.uint2single(image) isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = image.shape[:2] image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = image.shape[:2] hq = image.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), interpolation=random.choice([1, 2, 3])) else: image = util.imresize_np(image, 1 / 2, True) image = np.clip(image, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] for i in shuffle_order: if i == 0: image = add_blur(image, sf=sf) # elif i == 1: # image = add_blur(image, sf=sf) if i == 0: pass elif i == 2: a, b = image.shape[1], image.shape[0] # downsample2 if random.random() < 0.8: sf1 = random.uniform(1, 2 * sf) image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), interpolation=random.choice([1, 2, 3])) else: k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') image = image[0::sf, 0::sf, ...] # nearest downsampling image = np.clip(image, 0.0, 1.0) elif i == 3: # downsample3 image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) image = np.clip(image, 0.0, 1.0) elif i == 4: # add Gaussian noise image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: image = add_JPEG_noise(image) # # elif i == 6: # # add processed camera sensor noise # if random.random() < isp_prob and isp_model is not None: # with torch.no_grad(): # img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise image = add_JPEG_noise(image) image = util.single2uint(image) example = {"image": image} return example
imgs: list, N images of size WxHxC
def imssave(imgs, img_path): """ imgs: list, N images of size WxHxC """ img_name, ext = os.path.splitext(os.path.basename(img_path)) for i, img in enumerate(imgs): if img.ndim == 3: img = img[:, :, [2, 1, 0]] new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') cv2.imwrite(new_path, img)
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) will be splitted. Args: original_dataroot: taget_dataroot: p_size: size of small images p_overlap: patch size in training is a good choice p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): """ split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) will be splitted. Args: original_dataroot: taget_dataroot: p_size: size of small images p_overlap: patch size in training is a good choice p_max: images with smaller size than (p_max)x(p_max) keep unchanged. """ paths = get_image_paths(original_dataroot) for img_path in paths: # img_name, ext = os.path.splitext(os.path.basename(img_path)) img = imread_uint(img_path, n_channels=n_channels) patches = patches_from_image(img, p_size, p_overlap, p_max) imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
Converts a torch Tensor into an image Numpy array of BGR channel order Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): ''' Converts a torch Tensor into an image Numpy array of BGR channel order Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) ''' tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] n_dim = tensor.dim() if n_dim == 4: n_img = len(tensor) img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 3: img_np = tensor.numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 2: img_np = tensor.numpy() else: raise TypeError( 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) if out_type == np.uint8: img_np = (img_np * 255.0).round() # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. return img_np.astype(out_type)
Kai Zhang (github: https://github.com/cszn)
def augment_img(img, mode=0): '''Kai Zhang (github: https://github.com/cszn) ''' if mode == 0: return img elif mode == 1: return np.flipud(np.rot90(img)) elif mode == 2: return np.flipud(img) elif mode == 3: return np.rot90(img, k=3) elif mode == 4: return np.flipud(np.rot90(img, k=2)) elif mode == 5: return np.rot90(img) elif mode == 6: return np.rot90(img, k=2) elif mode == 7: return np.flipud(np.rot90(img, k=3))
Kai Zhang (github: https://github.com/cszn)
def augment_img_tensor4(img, mode=0): '''Kai Zhang (github: https://github.com/cszn) ''' if mode == 0: return img elif mode == 1: return img.rot90(1, [2, 3]).flip([2]) elif mode == 2: return img.flip([2]) elif mode == 3: return img.rot90(3, [2, 3]) elif mode == 4: return img.rot90(2, [2, 3]).flip([2]) elif mode == 5: return img.rot90(1, [2, 3]) elif mode == 6: return img.rot90(2, [2, 3]) elif mode == 7: return img.rot90(3, [2, 3]).flip([2])
Kai Zhang (github: https://github.com/cszn)
def augment_img_tensor(img, mode=0): '''Kai Zhang (github: https://github.com/cszn) ''' img_size = img.size() img_np = img.data.cpu().numpy() if len(img_size) == 3: img_np = np.transpose(img_np, (1, 2, 0)) elif len(img_size) == 4: img_np = np.transpose(img_np, (2, 3, 1, 0)) img_np = augment_img(img_np, mode=mode) img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) if len(img_size) == 3: img_tensor = img_tensor.permute(2, 0, 1) elif len(img_size) == 4: img_tensor = img_tensor.permute(3, 2, 0, 1) return img_tensor.type_as(img)
same as matlab rgb2ycbcr only_y: only return Y channel Input: uint8, [0, 255] float, [0, 1]
def rgb2ycbcr(img, only_y=True): '''same as matlab rgb2ycbcr only_y: only return Y channel Input: uint8, [0, 255] float, [0, 1] ''' in_img_type = img.dtype img.astype(np.float32) if in_img_type != np.uint8: img *= 255. # convert if only_y: rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 else: rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] if in_img_type == np.uint8: rlt = rlt.round() else: rlt /= 255. return rlt.astype(in_img_type)
same as matlab ycbcr2rgb Input: uint8, [0, 255] float, [0, 1]
def ycbcr2rgb(img): '''same as matlab ycbcr2rgb Input: uint8, [0, 255] float, [0, 1] ''' in_img_type = img.dtype img.astype(np.float32) if in_img_type != np.uint8: img *= 255. # convert rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] if in_img_type == np.uint8: rlt = rlt.round() else: rlt /= 255. return rlt.astype(in_img_type)
bgr version of rgb2ycbcr only_y: only return Y channel Input: uint8, [0, 255] float, [0, 1]
def bgr2ycbcr(img, only_y=True): '''bgr version of rgb2ycbcr only_y: only return Y channel Input: uint8, [0, 255] float, [0, 1] ''' in_img_type = img.dtype img.astype(np.float32) if in_img_type != np.uint8: img *= 255. # convert if only_y: rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 else: rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] if in_img_type == np.uint8: rlt = rlt.round() else: rlt /= 255. return rlt.astype(in_img_type)
calculate SSIM the same outputs as MATLAB's img1, img2: [0, 255]
def calculate_ssim(img1, img2, border=0): '''calculate SSIM the same outputs as MATLAB's img1, img2: [0, 255] ''' #img1 = img1.squeeze() #img2 = img2.squeeze() if not img1.shape == img2.shape: raise ValueError('Input images must have the same dimensions.') h, w = img1.shape[:2] img1 = img1[border:h-border, border:w-border] img2 = img2[border:h-border, border:w-border] if img1.ndim == 2: return ssim(img1, img2) elif img1.ndim == 3: if img1.shape[2] == 3: ssims = [] for i in range(3): ssims.append(ssim(img1[:,:,i], img2[:,:,i])) return np.array(ssims).mean() elif img1.shape[2] == 1: return ssim(np.squeeze(img1), np.squeeze(img2)) else: raise ValueError('Wrong input image dimensions.')
Adapted from https://github.com/hche11/VGGSound/blob/master/utils.py Calculate statistics including mAP, AUC, and d-prime. Args: output: 2d tensors, (dataset_size, classes_num) - before softmax target: 1d tensors, (dataset_size, ) topk: tuple Returns: metric_dict: a dict of metrics
def metrics(targets, outputs, topk=(1, 5)): """ Adapted from https://github.com/hche11/VGGSound/blob/master/utils.py Calculate statistics including mAP, AUC, and d-prime. Args: output: 2d tensors, (dataset_size, classes_num) - before softmax target: 1d tensors, (dataset_size, ) topk: tuple Returns: metric_dict: a dict of metrics """ metrics_dict = dict() num_cls = outputs.shape[-1] # accuracy@k _, preds = torch.topk(outputs, k=max(topk), dim=1) correct_for_maxtopk = preds == targets.view(-1, 1).expand_as(preds) for k in topk: metrics_dict[f'accuracy_{k}'] = float(correct_for_maxtopk[:, :k].sum() / correct_for_maxtopk.shape[0]) # avg precision, average roc_auc, and dprime targets = torch.nn.functional.one_hot(targets, num_classes=num_cls) # ids of the predicted classes (same as softmax) targets_pred = torch.softmax(outputs, dim=1) targets = targets.numpy() targets_pred = targets_pred.numpy() # one-vs-rest avg_p = [average_precision_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)] try: roc_aucs = [roc_auc_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)] except ValueError: logger.warning('Weird... Some classes never occured in targets. Do not trust the metrics.') roc_aucs = np.array([0.5]) avg_p = np.array([0]) metrics_dict['mAP'] = np.mean(avg_p) metrics_dict['mROCAUC'] = np.mean(roc_aucs) # Percent point function (ppf) (inverse of cdf — percentiles). metrics_dict['dprime'] = scipy.stats.norm().ppf(metrics_dict['mROCAUC']) * np.sqrt(2) return metrics_dict
Returns the number of steps trained from a checkpoint based on the filename. Filename format assumed to be something like "/path/to/semantic.transformer.20000.pt" which is for 20k train steps. Returns 20000 in that case.
def checkpoint_num_steps(checkpoint_path): """Returns the number of steps trained from a checkpoint based on the filename. Filename format assumed to be something like "/path/to/semantic.transformer.20000.pt" which is for 20k train steps. Returns 20000 in that case. """ results = re.findall(r'\d+', str(checkpoint_path)) if len(results) == 0: return 0 return int(results[-1])
Mixes in a background sound into the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param background_audio: the path to the background audio or a variable of type np.ndarray containing the background audio. If set to `None`, the background audio will be white noise @param snr_level_db: signal-to-noise ratio in dB @param seed: a NumPy random generator (or seed) such that the results remain reproducible @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def add_background_noise( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, background_audio: Optional[Union[str, np.ndarray]] = None, snr_level_db: float = 10.0, seed: Optional[audutils.RNGSeed] = None, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Mixes in a background sound into the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param background_audio: the path to the background audio or a variable of type np.ndarray containing the background audio. If set to `None`, the background audio will be white noise @param snr_level_db: signal-to-noise ratio in dB @param seed: a NumPy random generator (or seed) such that the results remain reproducible @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance( snr_level_db, (int, float) ), "Expected 'snr_level_db' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs.pop("metadata") func_kwargs.pop("seed") random_generator = audutils.check_random_state(seed) if background_audio is None: background_audio = random_generator.standard_normal(audio.shape) else: background_audio, background_sr = audutils.validate_and_load_audio( background_audio, sample_rate ) if background_sr != sample_rate: background_audio = resample( torch.tensor(background_audio), background_sr, sample_rate ).numpy() if metadata is not None: func_kwargs["background_duration"] = background_audio.shape[-1] / sample_rate audio_rms = np.sqrt(np.mean(np.square(audio), axis=-1)) bg_rms = np.sqrt(np.mean(np.square(background_audio), axis=-1)) desired_bg_rms = audio_rms / (10 ** (snr_level_db / 20)) if isinstance(bg_rms, np.number) and isinstance(desired_bg_rms, np.ndarray): desired_bg_rms = desired_bg_rms.mean() elif isinstance(bg_rms, np.ndarray) and isinstance(desired_bg_rms, np.number): bg_rms = bg_rms.mean() elif isinstance(bg_rms, np.ndarray) and isinstance(desired_bg_rms, np.ndarray): bg_rms = bg_rms.reshape((bg_rms.shape[0], 1)) desired_bg_rms = desired_bg_rms.reshape((desired_bg_rms.shape[0], 1)) assert bg_rms.shape == desired_bg_rms.shape, ( "Handling stereo audio and stereo background audio with different " "amounts of channels is currently unsupported" ) background_audio *= desired_bg_rms / bg_rms while background_audio.shape[-1] < audio.shape[-1]: axis = 0 if background_audio.ndim == 1 else 1 background_audio = np.concatenate( (background_audio, background_audio), axis=axis ) background_audio = ( background_audio[: audio.shape[-1]] if background_audio.ndim == 1 else background_audio[:, : audio.shape[-1]] ) aug_audio = audio + background_audio if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="add_background_noise", dst_audio=aug_audio, dst_sample_rate=sample_rate, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Apply a user-defined lambda to the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param aug_function: the augmentation function to be applied onto the audio (should expect the audio np.ndarray & sample rate int as input, and return the transformed audio & sample rate) @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @param **kwargs: the input attributes to be passed into `aug_function` @returns: the augmented audio array and sample rate
def apply_lambda( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, aug_function: Callable[..., Tuple[np.ndarray, int]] = lambda x, y: (x, y), output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, **kwargs, ) -> Tuple[np.ndarray, int]: """ Apply a user-defined lambda to the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param aug_function: the augmentation function to be applied onto the audio (should expect the audio np.ndarray & sample rate int as input, and return the transformed audio & sample rate) @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @param **kwargs: the input attributes to be passed into `aug_function` @returns: the augmented audio array and sample rate """ assert callable(aug_function), ( repr(type(aug_function).__name__) + " object is not callable" ) audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) aug_audio, out_sample_rate = aug_function(audio, sample_rate, **kwargs) audutils.get_metadata( metadata=metadata, function_name="apply_lambda", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=out_sample_rate, aug_function=aug_function.__name__, output_path=output_path, ) return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
Changes the volume of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param volume_db: the decibel amount by which to either increase (positive value) or decrease (negative value) the volume of the audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def change_volume( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, volume_db: float = 0.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Changes the volume of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param volume_db: the decibel amount by which to either increase (positive value) or decrease (negative value) the volume of the audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance(volume_db, (int, float)), "Expected 'volume_db' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) num_channels = 1 if audio.ndim == 1 else audio.shape[0] aug_audio = audio.reshape((num_channels, -1)) aug_audio, out_sample_rate = sox_effects.apply_effects_tensor( torch.Tensor(aug_audio), sample_rate, [["vol", str(volume_db), "dB"]] ) aug_audio = aug_audio.numpy() if num_channels == 1: aug_audio = aug_audio.reshape((aug_audio.shape[-1],)) audutils.get_metadata( metadata=metadata, function_name="change_volume", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=sample_rate, volume_db=volume_db, output_path=output_path, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Adds clicks to the audio at a given regular interval @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param seconds_between_clicks: the amount of time between each click that will be added to the audio, in seconds @param snr_level_db: signal-to-noise ratio in dB @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def clicks( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, seconds_between_clicks: float = 0.5, snr_level_db: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Adds clicks to the audio at a given regular interval @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param seconds_between_clicks: the amount of time between each click that will be added to the audio, in seconds @param snr_level_db: signal-to-noise ratio in dB @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance( seconds_between_clicks, (int, float) ), "Expected 'seconds_between_clicks' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) num_samples = audio.shape[-1] seconds_in_audio = num_samples / sample_rate times = np.arange(0, seconds_in_audio, seconds_between_clicks) clicks_audio = librosa.clicks(times=times, sr=sample_rate) aug_audio, out_sample_rate = add_background_noise( audio, sample_rate=sample_rate, background_audio=clicks_audio, snr_level_db=snr_level_db, ) audutils.get_metadata( metadata=metadata, function_name="clicks", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=out_sample_rate, seconds_between_clicks=seconds_between_clicks, output_path=output_path, clicks_duration=clicks_audio.shape[-1] / sample_rate, ) return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
Clips the audio using the specified offset and duration factors @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param offset_factor: start point of the crop relative to the audio duration (this parameter is multiplied by the audio duration) @param duration_factor: the length of the crop relative to the audio duration (this parameter is multiplied by the audio duration) @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def clip( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, offset_factor: float = 0.0, duration_factor: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Clips the audio using the specified offset and duration factors @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param offset_factor: start point of the crop relative to the audio duration (this parameter is multiplied by the audio duration) @param duration_factor: the length of the crop relative to the audio duration (this parameter is multiplied by the audio duration) @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert ( 0.0 <= (offset_factor + duration_factor) <= 1.0 ), "Combination of offset and duration factors exceed audio length" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs.pop("metadata") num_samples = audio.shape[-1] start = int(offset_factor * num_samples) end = int((offset_factor + duration_factor) * num_samples) aug_audio = audio[..., start:end] if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="clip", dst_audio=aug_audio, dst_sample_rate=sample_rate, start_sample=start, end_sample=end, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Extracts the harmonic part of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param kernel_size: kernel size for the median filters @param power: exponent for the Wiener filter when constructing soft mask matrices @param margin: margin size for the masks @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def harmonic( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, kernel_size: int = 31, power: float = 2.0, margin: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Extracts the harmonic part of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param kernel_size: kernel size for the median filters @param power: exponent for the Wiener filter when constructing soft mask matrices @param margin: margin size for the masks @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance(kernel_size, int), "Expected 'kernel_size' to be an int" assert isinstance(power, (int, float)), "Expected 'power' to be a number" assert isinstance(margin, (int, float)), "Expected 'margin' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs.pop("metadata") num_channels = 1 if audio.ndim == 1 else audio.shape[0] if num_channels == 1: aug_audio = librosa.effects.harmonic( audio, kernel_size=kernel_size, power=power, margin=margin ) else: aug_audio = np.vstack( [ librosa.effects.harmonic( np.asfortranarray(audio[c]), kernel_size=kernel_size, power=power, margin=margin, ) for c in range(num_channels) ] ) if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="harmonic", dst_audio=aug_audio, dst_sample_rate=sample_rate, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Allows audio signals with a frequency higher than the given cutoff to pass through and attenuates signals with frequencies lower than the cutoff frequency @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param cutoff_hz: frequency (in Hz) where signals with lower frequencies will begin to be reduced by 6dB per octave (doubling in frequency) below this point @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def high_pass_filter( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, cutoff_hz: float = 3000.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Allows audio signals with a frequency higher than the given cutoff to pass through and attenuates signals with frequencies lower than the cutoff frequency @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param cutoff_hz: frequency (in Hz) where signals with lower frequencies will begin to be reduced by 6dB per octave (doubling in frequency) below this point @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance(cutoff_hz, (int, float)), "Expected 'cutoff_hz' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs.pop("metadata") num_channels = 1 if audio.ndim == 1 else audio.shape[0] audio = audio.reshape((num_channels, -1)) aug_audio, out_sample_rate = sox_effects.apply_effects_tensor( torch.Tensor(audio), sample_rate, [["highpass", str(cutoff_hz)]] ) high_pass_array = aug_audio.numpy() if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="high_pass_filter", dst_audio=high_pass_array, dst_sample_rate=out_sample_rate, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(high_pass_array, output_path, out_sample_rate)
Inserts audio into a background clip in a non-overlapping manner. @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param offset_factor: insert point relative to the background duration (this parameter is multiplied by the background duration) @param background_audio: the path to the background audio or a variable of type np.ndarray containing the background audio. If set to `None`, the background audio will be white noise, with the same duration as the audio. @param seed: a NumPy random generator (or seed) such that the results remain reproducible @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def insert_in_background( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, offset_factor: float = 0.0, background_audio: Optional[Union[str, np.ndarray]] = None, seed: Optional[audutils.RNGSeed] = None, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Inserts audio into a background clip in a non-overlapping manner. @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param offset_factor: insert point relative to the background duration (this parameter is multiplied by the background duration) @param background_audio: the path to the background audio or a variable of type np.ndarray containing the background audio. If set to `None`, the background audio will be white noise, with the same duration as the audio. @param seed: a NumPy random generator (or seed) such that the results remain reproducible @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert ( 0.0 <= offset_factor <= 1.0 ), "Expected 'offset_factor' to be a number in the range [0, 1]" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs.pop("metadata") func_kwargs.pop("seed") random_generator = audutils.check_random_state(seed) if background_audio is None: background_audio = random_generator.standard_normal(audio.shape) else: background_audio, _ = audutils.validate_and_load_audio( background_audio, sample_rate ) num_channels = 1 if audio.ndim == 1 else audio.shape[0] bg_num_channels = 1 if background_audio.ndim == 1 else background_audio.shape[0] if bg_num_channels != num_channels: background_audio, _background_sr = to_mono(background_audio) if num_channels > 1: background_audio = np.tile(background_audio, (num_channels, 1)) num_samples_bg = background_audio.shape[-1] offset = int(offset_factor * num_samples_bg) aug_audio = np.hstack( [background_audio[..., :offset], audio, background_audio[..., offset:]] ) if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="insert_in_background", dst_audio=aug_audio, dst_sample_rate=sample_rate, background_duration=background_audio.shape[-1] / sample_rate, offset=offset, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Inverts channels of the audio. If the audio has only one channel, no change is applied. Otherwise, it inverts the order of the channels, eg for 4 channels, it returns channels in order [3, 2, 1, 0]. @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def invert_channels( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Inverts channels of the audio. If the audio has only one channel, no change is applied. Otherwise, it inverts the order of the channels, eg for 4 channels, it returns channels in order [3, 2, 1, 0]. @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) aug_audio = audio if audio.ndim > 1: num_channels = audio.shape[0] inverted_channels = list(range(num_channels))[::-1] aug_audio = audio[inverted_channels, :] audutils.get_metadata( metadata=metadata, function_name="invert_channels", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=sample_rate, output_path=output_path, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Loops the audio 'n' times @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param n: the number of times the audio will be looped @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def loop( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, n: int = 1, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Loops the audio 'n' times @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param n: the number of times the audio will be looped @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance(n, int) and n >= 0, "Expected 'n' to be a nonnegative integer" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) aug_audio = audio for _ in range(n): aug_audio = np.append(aug_audio, audio, axis=(0 if audio.ndim == 1 else 1)) audutils.get_metadata( metadata=metadata, function_name="loop", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=sample_rate, output_path=output_path, n=n, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Allows audio signals with a frequency lower than the given cutoff to pass through and attenuates signals with frequencies higher than the cutoff frequency @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param cutoff_hz: frequency (in Hz) where signals with higher frequencies will begin to be reduced by 6dB per octave (doubling in frequency) above this point @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def low_pass_filter( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, cutoff_hz: float = 500.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Allows audio signals with a frequency lower than the given cutoff to pass through and attenuates signals with frequencies higher than the cutoff frequency @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param cutoff_hz: frequency (in Hz) where signals with higher frequencies will begin to be reduced by 6dB per octave (doubling in frequency) above this point @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance(cutoff_hz, (int, float)), "Expected 'cutoff_hz' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs.pop("metadata") num_channels = 1 if audio.ndim == 1 else audio.shape[0] audio = audio.reshape((num_channels, -1)) aug_audio, out_sample_rate = sox_effects.apply_effects_tensor( torch.Tensor(audio), sample_rate, [["lowpass", str(cutoff_hz)]] ) low_pass_array = aug_audio.numpy() if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="low_pass_filter", dst_audio=low_pass_array, dst_sample_rate=out_sample_rate, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(low_pass_array, output_path, out_sample_rate)
Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1) @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param norm: the type of norm to compute: - np.inf: maximum absolute value - -np.inf: minimum absolute value - 0: number of non-zeros (the support) - float: corresponding l_p norm - None: no normalization is performed @param axis: axis along which to compute the norm @param threshold: if provided, only the columns (or rows) with norm of at least `threshold` are normalized @param fill: if None, then columns (or rows) with norm below `threshold` are left as is. If False, then columns (rows) with norm below `threshold` are set to 0. If True, then columns (rows) with norm below `threshold` are filled uniformly such that the corresponding norm is 1 @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def normalize( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, norm: Optional[float] = np.inf, axis: int = 0, threshold: Optional[float] = None, fill: Optional[bool] = None, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1) @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param norm: the type of norm to compute: - np.inf: maximum absolute value - -np.inf: minimum absolute value - 0: number of non-zeros (the support) - float: corresponding l_p norm - None: no normalization is performed @param axis: axis along which to compute the norm @param threshold: if provided, only the columns (or rows) with norm of at least `threshold` are normalized @param fill: if None, then columns (or rows) with norm below `threshold` are left as is. If False, then columns (rows) with norm below `threshold` are set to 0. If True, then columns (rows) with norm below `threshold` are filled uniformly such that the corresponding norm is 1 @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert ( isinstance(axis, int) and axis >= 0 ), "Expected 'axis' to be a nonnegative number" assert threshold is None or isinstance( threshold, (int, float) ), "Expected 'threshold' to be a number or None" assert fill is None or isinstance( fill, bool ), "Expected 'threshold' to be a boolean or None" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs["norm"] = str(func_kwargs["norm"]) func_kwargs.pop("metadata") aug_audio = librosa.util.normalize( audio, norm=norm, axis=axis, threshold=threshold, fill=fill ) if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="normalize", dst_audio=aug_audio, dst_sample_rate=sample_rate, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Applies a two-pole peaking equalization filter. The signal-level at and around `center_hz` can be increased or decreased, while all other frequencies are unchanged @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param center_hz: point in the frequency spectrum at which EQ is applied @param q: ratio of center frequency to bandwidth; bandwidth is inversely proportional to Q, meaning that as you raise Q, you narrow the bandwidth @param gain_db: amount of gain (boost) or reduction (cut) that is applied at a given frequency. Beware of clipping when using positive gain @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def peaking_equalizer( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, center_hz: float = 500.0, q: float = 1.0, gain_db: float = -3.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Applies a two-pole peaking equalization filter. The signal-level at and around `center_hz` can be increased or decreased, while all other frequencies are unchanged @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param center_hz: point in the frequency spectrum at which EQ is applied @param q: ratio of center frequency to bandwidth; bandwidth is inversely proportional to Q, meaning that as you raise Q, you narrow the bandwidth @param gain_db: amount of gain (boost) or reduction (cut) that is applied at a given frequency. Beware of clipping when using positive gain @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance(center_hz, (int, float)), "Expected 'center_hz' to be a number" assert isinstance(q, (int, float)) and q > 0, "Expected 'q' to be a positive number" assert isinstance(gain_db, (int, float)), "Expected 'gain_db' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs.pop("metadata") num_channels = 1 if audio.ndim == 1 else audio.shape[0] aug_audio = audio.reshape((num_channels, -1)) aug_audio, out_sample_rate = sox_effects.apply_effects_tensor( torch.Tensor(aug_audio), sample_rate, [["equalizer", str(center_hz), f"{q}q", str(gain_db)]], ) aug_audio = aug_audio.numpy() if num_channels == 1: aug_audio = aug_audio.reshape((aug_audio.shape[-1],)) if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="peaking_equalizer", dst_audio=aug_audio, dst_sample_rate=out_sample_rate, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
Extracts the percussive part of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param kernel_size: kernel size for the median filters @param power: exponent for the Wiener filter when constructing soft mask matrices @param margin: margin size for the masks @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def percussive( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, kernel_size: int = 31, power: float = 2.0, margin: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Extracts the percussive part of the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param kernel_size: kernel size for the median filters @param power: exponent for the Wiener filter when constructing soft mask matrices @param margin: margin size for the masks @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance(kernel_size, int), "Expected 'kernel_size' to be an int" assert isinstance(power, (int, float)), "Expected 'power' to be a number" assert isinstance(margin, (int, float)), "Expected 'margin' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs.pop("metadata") num_channels = 1 if audio.ndim == 1 else audio.shape[0] if num_channels == 1: aug_audio = librosa.effects.percussive( audio, kernel_size=kernel_size, power=power, margin=margin ) else: aug_audio = np.vstack( [ librosa.effects.percussive( np.asfortranarray(audio[c]), kernel_size=kernel_size, power=power, margin=margin, ) for c in range(num_channels) ] ) if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="percussive", dst_audio=aug_audio, dst_sample_rate=sample_rate, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Shifts the pitch of the audio by `n_steps` @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param n_steps: each step is equal to one semitone @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def pitch_shift( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, n_steps: float = 1.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Shifts the pitch of the audio by `n_steps` @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param n_steps: each step is equal to one semitone @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance(n_steps, (int, float)), "Expected 'n_steps' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) num_channels = 1 if audio.ndim == 1 else audio.shape[0] if num_channels == 1: aug_audio = librosa.effects.pitch_shift(audio, sr=sample_rate, n_steps=n_steps) else: aug_audio = np.vstack( [ librosa.effects.pitch_shift( np.asfortranarray(audio[c]), sr=sample_rate, n_steps=n_steps ) for c in range(num_channels) ] ) audutils.get_metadata( metadata=metadata, function_name="pitch_shift", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=sample_rate, output_path=output_path, n_steps=n_steps, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Adds reverberation to the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param reverberance: (%) sets the length of the reverberation tail. This determines how long the reverberation continues for after the original sound being reverbed comes to an end, and so simulates the "liveliness" of the room acoustics @param hf_damping: (%) increasing the damping produces a more "muted" effect. The reverberation does not build up as much, and the high frequencies decay faster than the low frequencies @param room_scale: (%) sets the size of the simulated room. A high value will simulate the reverberation effect of a large room and a low value will simulate the effect of a small room @param stereo_depth: (%) sets the apparent "width" of the reverb effect for stereo tracks only. Increasing this value applies more variation between left and right channels, creating a more "spacious" effect. When set at zero, the effect is applied independently to left and right channels @param pre_delay: (ms) delays the onset of the reverberation for the set time after the start of the original input. This also delays the onset of the reverb tail @param wet_gain: (db) applies volume adjustment to the reverberation ("wet") component in the mix @param wet_only: only the wet signal (added reverberation) will be in the resulting output, and the original audio will be removed @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def reverb( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, reverberance: float = 50.0, hf_damping: float = 50.0, room_scale: float = 100.0, stereo_depth: float = 100.0, pre_delay: float = 0.0, wet_gain: float = 0.0, wet_only: bool = False, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Adds reverberation to the audio @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param reverberance: (%) sets the length of the reverberation tail. This determines how long the reverberation continues for after the original sound being reverbed comes to an end, and so simulates the "liveliness" of the room acoustics @param hf_damping: (%) increasing the damping produces a more "muted" effect. The reverberation does not build up as much, and the high frequencies decay faster than the low frequencies @param room_scale: (%) sets the size of the simulated room. A high value will simulate the reverberation effect of a large room and a low value will simulate the effect of a small room @param stereo_depth: (%) sets the apparent "width" of the reverb effect for stereo tracks only. Increasing this value applies more variation between left and right channels, creating a more "spacious" effect. When set at zero, the effect is applied independently to left and right channels @param pre_delay: (ms) delays the onset of the reverberation for the set time after the start of the original input. This also delays the onset of the reverb tail @param wet_gain: (db) applies volume adjustment to the reverberation ("wet") component in the mix @param wet_only: only the wet signal (added reverberation) will be in the resulting output, and the original audio will be removed @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert isinstance( reverberance, (int, float) ), "Expected 'reverberance' to be a number" assert isinstance(hf_damping, (int, float)), "Expected 'hf_damping' to be a number" assert isinstance(room_scale, (int, float)), "Expected 'room_scale' to be a number" assert isinstance( stereo_depth, (int, float) ), "Expected 'stereo_depth' to be a number" assert isinstance(pre_delay, (int, float)), "Expected 'pre_delay' to be a number" assert isinstance(wet_gain, (int, float)), "Expected 'wet_gain' to be a number" assert isinstance(wet_only, bool), "Expected 'wet_only' to be a number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) if metadata is not None: func_kwargs = deepcopy(locals()) func_kwargs.pop("metadata") aug_audio = audio.reshape((1, audio.shape[-1])) if audio.ndim == 1 else audio effect = ["reverb"] if wet_only: effect.append("-w") aug_audio, out_sample_rate = sox_effects.apply_effects_tensor( torch.Tensor(aug_audio), sample_rate, [ effect + [ str(reverberance), str(hf_damping), str(room_scale), str(stereo_depth), str(pre_delay), str(wet_gain), ] ], ) aug_audio = aug_audio.numpy() if audio.shape[0] == 1: aug_audio = aug_audio.reshape((aug_audio.shape[-1],)) if metadata is not None: audutils.get_metadata( metadata=metadata, function_name="reverb", dst_audio=aug_audio, dst_sample_rate=out_sample_rate, # pyre-fixme[61]: `func_kwargs` may not be initialized here. **func_kwargs, ) return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
Changes the speed of the audio, affecting pitch as well @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param factor: the speed factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def speed( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, factor: float = 2.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Changes the speed of the audio, affecting pitch as well @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param factor: the speed factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert ( isinstance(factor, (int, float)) and factor > 0 ), "Expected 'factor' to be a positive number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) out_sample_rate = int(sample_rate * factor) audutils.get_metadata( metadata=metadata, function_name="speed", audio=audio, sample_rate=sample_rate, dst_audio=audio, dst_sample_rate=out_sample_rate, output_path=output_path, factor=factor, ) return audutils.ret_and_save_audio(audio, output_path, out_sample_rate)
Adjusts the tempo of the audio by a given factor @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param factor: the tempo factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor, without affecting the pitch @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def tempo( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, factor: float = 2.0, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Adjusts the tempo of the audio by a given factor @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param factor: the tempo factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor, without affecting the pitch @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert ( isinstance(factor, (int, float)) and factor > 0 ), "Expected 'factor' to be a positive number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) num_channels = 1 if audio.ndim == 1 else audio.shape[0] aug_audio = audio.reshape((num_channels, -1)) aug_audio, out_sample_rate = sox_effects.apply_effects_tensor( torch.Tensor(aug_audio), sample_rate, [["tempo", str(factor)]] ) aug_audio = aug_audio.numpy() if num_channels == 1: aug_audio = aug_audio.reshape((aug_audio.shape[-1],)) audutils.get_metadata( metadata=metadata, function_name="tempo", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=out_sample_rate, output_path=output_path, factor=factor, ) return audutils.ret_and_save_audio(aug_audio, output_path, out_sample_rate)
Time-stretches the audio by a fixed rate @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param rate: the time stretch factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def time_stretch( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, rate: float = 1.5, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Time-stretches the audio by a fixed rate @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param rate: the time stretch factor. If rate > 1 the audio will be sped up by that factor; if rate < 1 the audio will be slowed down by that factor @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ assert ( isinstance(rate, (int, float)) and rate > 0 ), "Expected 'rate' to be a positive number" audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) num_channels = 1 if audio.ndim == 1 else audio.shape[0] if num_channels == 1: aug_audio = librosa.effects.time_stretch(audio, rate=rate) else: aug_audio = np.vstack( [ librosa.effects.time_stretch(np.asfortranarray(audio[c]), rate=rate) for c in range(num_channels) ] ) audutils.get_metadata( metadata=metadata, function_name="time_stretch", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=sample_rate, output_path=output_path, rate=rate, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Converts the audio from stereo to mono by averaging samples across channels @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def to_mono( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Converts the audio from stereo to mono by averaging samples across channels @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) aug_audio = librosa.core.to_mono(audio) audutils.get_metadata( metadata=metadata, function_name="to_mono", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=sample_rate, output_path=output_path, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
Applies a convolution operation to audio using an impulse response as the convolution filter @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param normalize: if True, normalize the output to the maximum amplitude @param impulse_audio: the path to the audio or a variable of type np.ndarray that will be used as the convolution filter @param seed: the seed for the random number generator @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate
def fft_convolve( audio: Union[str, np.ndarray], sample_rate: int = DEFAULT_SAMPLE_RATE, normalize: bool = True, impulse_audio: Optional[Union[str, np.ndarray]] = None, seed: Optional[audutils.RNGSeed] = None, output_path: Optional[str] = None, metadata: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[np.ndarray, int]: """ Applies a convolution operation to audio using an impulse response as the convolution filter @param audio: the path to the audio or a variable of type np.ndarray that will be augmented @param sample_rate: the audio sample rate of the inputted audio @param normalize: if True, normalize the output to the maximum amplitude @param impulse_audio: the path to the audio or a variable of type np.ndarray that will be used as the convolution filter @param seed: the seed for the random number generator @param output_path: the path in which the resulting audio will be stored. If None, the resulting np.ndarray will still be returned @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest duration, sample rates, etc. will be appended to the inputted list. If set to None, no metadata will be appended @returns: the augmented audio array and sample rate """ audio, sample_rate = audutils.validate_and_load_audio(audio, sample_rate) num_channels = 1 if audio.ndim == 1 else audio.shape[0] if impulse_audio is None: random_generator = audutils.check_random_state(seed) impulse_audio = random_generator.standard_normal(audio.shape) else: impulse_audio, impulse_sample_rate = audutils.validate_and_load_audio( impulse_audio, sample_rate ) if impulse_sample_rate != sample_rate: impulse_audio = resample( torch.tensor(impulse_audio), impulse_sample_rate, sample_rate ).numpy() aug_audio = fftconvolve(torch.Tensor(audio), torch.Tensor(impulse_audio)) if normalize: aug_audio = aug_audio / aug_audio.abs().max() aug_audio = aug_audio.numpy() if num_channels == 1: aug_audio = aug_audio.reshape((aug_audio.shape[-1],)) audutils.get_metadata( metadata=metadata, function_name="fft_convolve", audio=audio, sample_rate=sample_rate, dst_audio=aug_audio, dst_sample_rate=sample_rate, output_path=output_path, ) return audutils.ret_and_save_audio(aug_audio, output_path, sample_rate)
If audio is a str, loads the audio as an np.ndarray and returns that & the audio's sample rate (returned by librosa.load()). If audio is an np.ndarray, just returns the passed in audio & sample_rate.
def validate_and_load_audio( audio: Union[str, np.ndarray], sample_rate: int = utils.DEFAULT_SAMPLE_RATE ) -> Tuple[np.ndarray, int]: """ If audio is a str, loads the audio as an np.ndarray and returns that & the audio's sample rate (returned by librosa.load()). If audio is an np.ndarray, just returns the passed in audio & sample_rate. """ if isinstance(audio, str): local_path = utils.pathmgr.get_local_path(audio) utils.validate_audio_path(local_path) return librosa.load(local_path, sr=None, mono=False) assert isinstance( audio, np.ndarray ), "Expected type np.ndarray for variable 'audio'" assert ( isinstance(sample_rate, int) and sample_rate > 0 ), "Expected 'sample_rate' to be a positive integer" return audio, sample_rate
Turn seed into a np.random.RandomState instance @param seed: instance of RandomState: If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError.
def check_random_state(seed: Optional[RNGSeed]) -> RNG: """ Turn seed into a np.random.RandomState instance @param seed: instance of RandomState: If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, numbers.Integral): return np.random.RandomState(seed) if isinstance(seed, (np.random.RandomState, np.random.Generator)): return seed raise ValueError( f"{seed} cannot be used to seed a numpy.random.RandomState instance" )
This function performs the logic of computing the new matching segments based on the old ones, for the set of transforms that temporally change the video. Returns the lists of new src segments & dst segments, respectively.
def compute_changed_segments( name: str, src_segments: List[Segment], dst_segments: List[Segment], src_duration: float, dst_duration: float, speed_factor: float, **kwargs, ) -> Tuple[List[Segment], List[Segment]]: """ This function performs the logic of computing the new matching segments based on the old ones, for the set of transforms that temporally change the video. Returns the lists of new src segments & dst segments, respectively. """ new_src_segments, new_dst_segments = [], [] for src_segment, dst_segment in zip(src_segments, dst_segments): if name == "insert_in_background": offset = kwargs["offset_factor"] * kwargs["background_duration"] # The matching segments are just offset in the dst audio by the amount # of background video inserted before the src video. new_src_segments.append(src_segment) new_dst_segments.append(dst_segment.delta(offset, offset)) elif name == "clip": crop_start = kwargs["offset_factor"] * src_duration crop_end = crop_start + kwargs["duration_factor"] * src_duration utils.compute_time_crop_segments( src_segment, dst_segment, speed_factor, crop_start, crop_end, new_src_segments, new_dst_segments, ) elif name == "fft_convolve": new_src_segments.append(src_segment) new_dst_segments.append(Segment(dst_segment.start, dst_duration)) elif name in [ "speed", "tempo", "time_stretch", ]: # speed_factor > 1 if speedup, < 1 if slow down speed_factor = src_duration / dst_duration new_src_segments.append(src_segment) new_dst_segments.append( Segment( dst_segment.start / speed_factor, dst_segment.end / speed_factor ) ) return new_src_segments, new_dst_segments
Apply a user-defined lambda on an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param aug_function: the augmentation function to be applied onto the image (should expect a PIL image as input and return one) @param **kwargs: the input attributes to be passed into the augmentation function to be applied @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image
def apply_lambda( image: Union[str, Image.Image], output_path: Optional[str] = None, aug_function: Callable[..., Image.Image] = lambda x: x, metadata: Optional[List[Dict[str, Any]]] = None, bboxes: Optional[List[Tuple]] = None, bbox_format: Optional[str] = None, **kwargs, ) -> Image.Image: """ Apply a user-defined lambda on an image @param image: the path to an image or a variable of type PIL.Image.Image to be augmented @param output_path: the path in which the resulting image will be stored. If None, the resulting PIL Image will still be returned @param aug_function: the augmentation function to be applied onto the image (should expect a PIL image as input and return one) @param **kwargs: the input attributes to be passed into the augmentation function to be applied @param metadata: if set to be a list, metadata about the function execution including its name, the source & dest width, height, etc. will be appended to the inputted list. If set to None, no metadata will be appended or returned @param bboxes: a list of bounding boxes can be passed in here if desired. If provided, this list will be modified in place such that each bounding box is transformed according to this function @param bbox_format: signifies what bounding box format was used in `bboxes`. Must specify `bbox_format` if `bboxes` is provided. Supported bbox_format values are "pascal_voc", "pascal_voc_norm", "coco", and "yolo" @returns: the augmented PIL Image """ assert callable(aug_function), ( repr(type(aug_function).__name__) + " object is not callable" ) image = imutils.validate_and_load_image(image) func_kwargs = deepcopy(locals()) if aug_function is not None: try: func_kwargs["aug_function"] = aug_function.__name__ except AttributeError: func_kwargs["aug_function"] = type(aug_function).__name__ func_kwargs = imutils.get_func_kwargs(metadata, func_kwargs) src_mode = image.mode aug_image = aug_function(image, **kwargs) imutils.get_metadata( metadata=metadata, function_name="apply_lambda", aug_image=aug_image, **func_kwargs, ) return imutils.ret_and_save_image(aug_image, output_path, src_mode)