response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Normalize the signal to the target level.
def normalize(audio: torch.Tensor, target_level: int = -25) -> torch.Tensor: """Normalize the signal to the target level.""" rms = rms_f(audio) scalar = 10 ** (target_level / 20) / (rms + EPS) audio = audio * scalar.unsqueeze(1) return audio
Function to mix clean speech and noise at various SNR levels. Args: clean (torch.Tensor): Clean audio source to mix, of shape [B, T]. noise (torch.Tensor): Noise audio source to mix, of shape [B, T]. snr (int): SNR level when mixing. min_overlap (float): Minimum overlap between the two mixed sources. target_level (int): Gain level in dB. clipping_threshold (float): Threshold for clipping the audio. Returns: torch.Tensor: The mixed audio, of shape [B, T].
def snr_mixer(clean: torch.Tensor, noise: torch.Tensor, snr: int, min_overlap: float, target_level: int = -25, clipping_threshold: float = 0.99) -> torch.Tensor: """Function to mix clean speech and noise at various SNR levels. Args: clean (torch.Tensor): Clean audio source to mix, of shape [B, T]. noise (torch.Tensor): Noise audio source to mix, of shape [B, T]. snr (int): SNR level when mixing. min_overlap (float): Minimum overlap between the two mixed sources. target_level (int): Gain level in dB. clipping_threshold (float): Threshold for clipping the audio. Returns: torch.Tensor: The mixed audio, of shape [B, T]. """ if clean.shape[1] > noise.shape[1]: noise = torch.nn.functional.pad(noise, (0, clean.shape[1] - noise.shape[1])) else: noise = noise[:, :clean.shape[1]] # normalizing to -25 dB FS clean = clean / (clean.max(1)[0].abs().unsqueeze(1) + EPS) clean = normalize(clean, target_level) rmsclean = rms_f(clean) noise = noise / (noise.max(1)[0].abs().unsqueeze(1) + EPS) noise = normalize(noise, target_level) rmsnoise = rms_f(noise) # set the noise level for a given SNR noisescalar = (rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS)).unsqueeze(1) noisenewlevel = noise * noisescalar # mix noise and clean speech noisyspeech = mix_pair(clean, noisenewlevel, min_overlap) # randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value # there is a chance of clipping that might happen with very less probability, which is not a major issue. noisy_rms_level = np.random.randint(TARGET_LEVEL_LOWER, TARGET_LEVEL_UPPER) rmsnoisy = rms_f(noisyspeech) scalarnoisy = (10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS)).unsqueeze(1) noisyspeech = noisyspeech * scalarnoisy clean = clean * scalarnoisy noisenewlevel = noisenewlevel * scalarnoisy # final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly clipped = is_clipped(noisyspeech) if clipped.any(): noisyspeech_maxamplevel = noisyspeech[clipped].max(1)[0].abs().unsqueeze(1) / (clipping_threshold - EPS) noisyspeech[clipped] = noisyspeech[clipped] / noisyspeech_maxamplevel return noisyspeech
Mix text from different sources by concatenating them.
def mix_text(src_text: str, dst_text: str): """Mix text from different sources by concatenating them.""" if src_text == dst_text: return src_text return src_text + " " + dst_text
Mix samples within a batch, summing the waveforms and concatenating the text infos. Args: wavs (torch.Tensor): Audio tensors of shape [B, C, T]. infos (list[SoundInfo]): List of SoundInfo items corresponding to the audio. aug_p (float): Augmentation probability. mix_p (float): Proportion of items in the batch to mix (and merge) together. snr_low (int): Lowerbound for sampling SNR. snr_high (int): Upperbound for sampling SNR. min_overlap (float): Minimum overlap between mixed samples. Returns: tuple[torch.Tensor, list[SoundInfo]]: A tuple containing the mixed wavs and mixed SoundInfo for the given batch.
def mix_samples(wavs: torch.Tensor, infos: tp.List[SoundInfo], aug_p: float, mix_p: float, snr_low: int, snr_high: int, min_overlap: float): """Mix samples within a batch, summing the waveforms and concatenating the text infos. Args: wavs (torch.Tensor): Audio tensors of shape [B, C, T]. infos (list[SoundInfo]): List of SoundInfo items corresponding to the audio. aug_p (float): Augmentation probability. mix_p (float): Proportion of items in the batch to mix (and merge) together. snr_low (int): Lowerbound for sampling SNR. snr_high (int): Upperbound for sampling SNR. min_overlap (float): Minimum overlap between mixed samples. Returns: tuple[torch.Tensor, list[SoundInfo]]: A tuple containing the mixed wavs and mixed SoundInfo for the given batch. """ # no mixing to perform within the batch if mix_p == 0: return wavs, infos if random.uniform(0, 1) < aug_p: # perform all augmentations on waveforms as [B, T] # randomly picking pairs of audio to mix assert wavs.size(1) == 1, f"Mix samples requires monophonic audio but C={wavs.size(1)}" wavs = wavs.mean(dim=1, keepdim=False) B, T = wavs.shape k = int(mix_p * B) mixed_sources_idx = torch.randperm(B)[:k] mixed_targets_idx = torch.randperm(B)[:k] aug_wavs = snr_mix( wavs[mixed_sources_idx], wavs[mixed_targets_idx], snr_low, snr_high, min_overlap, ) # mixing textual descriptions in metadata descriptions = [info.description for info in infos] aug_infos = [] for i, j in zip(mixed_sources_idx, mixed_targets_idx): text = mix_text(descriptions[i], descriptions[j]) m = replace(infos[i]) m.description = text aug_infos.append(m) # back to [B, C, T] aug_wavs = aug_wavs.unsqueeze(1) assert aug_wavs.shape[0] > 0, "Samples mixing returned empty batch." assert aug_wavs.dim() == 3, f"Returned wav should be [B, C, T] but dim = {aug_wavs.dim()}" assert aug_wavs.shape[0] == len(aug_infos), "Mismatch between number of wavs and infos in the batch" return aug_wavs, aug_infos # [B, C, T] else: # randomly pick samples in the batch to match # the batch size when performing audio mixing B, C, T = wavs.shape k = int(mix_p * B) wav_idx = torch.randperm(B)[:k] wavs = wavs[wav_idx] infos = [infos[i] for i in wav_idx] assert wavs.shape[0] == len(infos), "Mismatch between number of wavs and infos in the batch" return wavs, infos
Sets the maximal LRU caching for zip file opening. Args: max_size (int): the maximal LRU cache.
def set_zip_cache_size(max_size: int): """Sets the maximal LRU caching for zip file opening. Args: max_size (int): the maximal LRU cache. """ global _cached_open_zip _cached_open_zip = lru_cache(max_size)(_open_zip)
Opens a file stored inside a zip and returns a file-like object. Args: path_in_zip (PathInZip): A PathInZip object representing the file to return a file-like object of. mode (str): The mode in which to open the file with. Returns: A file-like object for PathInZip.
def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO: """Opens a file stored inside a zip and returns a file-like object. Args: path_in_zip (PathInZip): A PathInZip object representing the file to return a file-like object of. mode (str): The mode in which to open the file with. Returns: A file-like object for PathInZip. """ zf = _cached_open_zip(path_in_zip.zip_path) return zf.open(path_in_zip.file_path)
Return the amount of time since the Sheep made some update to its log. Returns a str using the relevant time unit.
def get_sheep_ping(sheep) -> tp.Optional[str]: """Return the amount of time since the Sheep made some update to its log. Returns a str using the relevant time unit.""" ping = None if sheep.log is not None and sheep.log.exists(): delta = time.time() - sheep.log.stat().st_mtime if delta > 3600 * 24: ping = f'{delta / (3600 * 24):.1f}d' elif delta > 3600: ping = f'{delta / (3600):.1f}h' elif delta > 60: ping = f'{delta / 60:.1f}m' else: ping = f'{delta:.1f}s' return ping
Given input of size [*OT, T], output Tensor of size [*OT, F, K] with K the kernel size, by extracting frames with the given stride. This will pad the input so that `F = ceil(T / K)`. see https://github.com/pytorch/pytorch/issues/60466
def _unfold(a: torch.Tensor, kernel_size: int, stride: int) -> torch.Tensor: """Given input of size [*OT, T], output Tensor of size [*OT, F, K] with K the kernel size, by extracting frames with the given stride. This will pad the input so that `F = ceil(T / K)`. see https://github.com/pytorch/pytorch/issues/60466 """ *shape, length = a.shape n_frames = math.ceil(length / stride) tgt_length = (n_frames - 1) * stride + kernel_size a = F.pad(a, (0, tgt_length - length)) strides = list(a.stride()) assert strides[-1] == 1, "data should be contiguous" strides = strides[:-1] + [stride, 1] return a.as_strided([*shape, n_frames, kernel_size], strides)
Perform STFT and convert to magnitude spectrogram. Args: x: Input signal tensor (B, C, T). fft_size (int): FFT size. hop_length (int): Hop size. win_length (int): Window length. window (torch.Tensor or None): Window function type. normalized (bool): Whether to normalize the STFT or not. Returns: torch.Tensor: Magnitude spectrogram (B, C, #frames, fft_size // 2 + 1).
def _stft(x: torch.Tensor, fft_size: int, hop_length: int, win_length: int, window: tp.Optional[torch.Tensor], normalized: bool) -> torch.Tensor: """Perform STFT and convert to magnitude spectrogram. Args: x: Input signal tensor (B, C, T). fft_size (int): FFT size. hop_length (int): Hop size. win_length (int): Window length. window (torch.Tensor or None): Window function type. normalized (bool): Whether to normalize the STFT or not. Returns: torch.Tensor: Magnitude spectrogram (B, C, #frames, fft_size // 2 + 1). """ B, C, T = x.shape x_stft = torch.stft( x.view(-1, T), fft_size, hop_length, win_length, window, normalized=normalized, return_complex=True, ) x_stft = x_stft.view(B, C, *x_stft.shape[1:]) real = x_stft.real imag = x_stft.imag # NOTE(kan-bayashi): clamp is needed to avoid nan or inf return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
Computes the elementwise KL-Divergence loss between probability distributions from generated samples and target samples. Args: pred_probs (torch.Tensor): Probabilities for each label obtained from a classifier on generated audio. Expected shape is [B, num_classes]. target_probs (torch.Tensor): Probabilities for each label obtained from a classifier on target audio. Expected shape is [B, num_classes]. epsilon (float): Epsilon value. Returns: kld (torch.Tensor): KLD loss between each generated sample and target pair.
def kl_divergence(pred_probs: torch.Tensor, target_probs: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor: """Computes the elementwise KL-Divergence loss between probability distributions from generated samples and target samples. Args: pred_probs (torch.Tensor): Probabilities for each label obtained from a classifier on generated audio. Expected shape is [B, num_classes]. target_probs (torch.Tensor): Probabilities for each label obtained from a classifier on target audio. Expected shape is [B, num_classes]. epsilon (float): Epsilon value. Returns: kld (torch.Tensor): KLD loss between each generated sample and target pair. """ kl_div = torch.nn.functional.kl_div((pred_probs + epsilon).log(), target_probs, reduction="none") return kl_div.sum(-1)
Instantiate a compression model.
def get_compression_model(cfg: omegaconf.DictConfig) -> CompressionModel: """Instantiate a compression model.""" if cfg.compression_model == 'encodec': kwargs = dict_from_config(getattr(cfg, 'encodec')) encoder_name = kwargs.pop('autoencoder') quantizer_name = kwargs.pop('quantizer') encoder, decoder = get_encodec_autoencoder(encoder_name, cfg) quantizer = get_quantizer(quantizer_name, cfg, encoder.dimension) frame_rate = kwargs['sample_rate'] // encoder.hop_length renormalize = kwargs.pop('renormalize', False) # deprecated params kwargs.pop('renorm', None) return EncodecModel(encoder, decoder, quantizer, frame_rate=frame_rate, renormalize=renormalize, **kwargs).to(cfg.device) else: raise KeyError(f"Unexpected compression model {cfg.compression_model}")
Instantiate a transformer LM.
def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel: """Instantiate a transformer LM.""" if cfg.lm_model in ['transformer_lm', 'transformer_lm_magnet']: kwargs = dict_from_config(getattr(cfg, 'transformer_lm')) n_q = kwargs['n_q'] q_modeling = kwargs.pop('q_modeling', None) codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern') attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout')) cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance')) cfg_prob, cfg_coef = cls_free_guidance['training_dropout'], cls_free_guidance['inference_coef'] fuser = get_condition_fuser(cfg) condition_provider = get_conditioner_provider(kwargs["dim"], cfg).to(cfg.device) if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programmatically kwargs['cross_attention'] = True if codebooks_pattern_cfg.modeling is None: assert q_modeling is not None, \ "LM model should either have a codebook pattern defined or transformer_lm.q_modeling" codebooks_pattern_cfg = omegaconf.OmegaConf.create( {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}} ) pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg) lm_class = MagnetLMModel if cfg.lm_model == 'transformer_lm_magnet' else LMModel return lm_class( pattern_provider=pattern_provider, condition_provider=condition_provider, fuser=fuser, cfg_dropout=cfg_prob, cfg_coef=cfg_coef, attribute_dropout=attribute_dropout, dtype=getattr(torch, cfg.dtype), device=cfg.device, **kwargs ).to(cfg.device) else: raise KeyError(f"Unexpected LM model {cfg.lm_model}")
Instantiate a conditioning model.
def get_conditioner_provider(output_dim: int, cfg: omegaconf.DictConfig) -> ConditioningProvider: """Instantiate a conditioning model.""" device = cfg.device duration = cfg.dataset.segment_duration cfg = getattr(cfg, 'conditioners') dict_cfg = {} if cfg is None else dict_from_config(cfg) conditioners: tp.Dict[str, BaseConditioner] = {} condition_provider_args = dict_cfg.pop('args', {}) condition_provider_args.pop('merge_text_conditions_p', None) condition_provider_args.pop('drop_desc_p', None) for cond, cond_cfg in dict_cfg.items(): model_type = cond_cfg['model'] model_args = cond_cfg[model_type] if model_type == 't5': conditioners[str(cond)] = T5Conditioner(output_dim=output_dim, device=device, **model_args) elif model_type == 'lut': conditioners[str(cond)] = LUTConditioner(output_dim=output_dim, **model_args) elif model_type == 'chroma_stem': conditioners[str(cond)] = ChromaStemConditioner( output_dim=output_dim, duration=duration, device=device, **model_args ) elif model_type == 'clap': conditioners[str(cond)] = CLAPEmbeddingConditioner( output_dim=output_dim, device=device, **model_args ) else: raise ValueError(f"Unrecognized conditioning model: {model_type}") conditioner = ConditioningProvider(conditioners, device=device, **condition_provider_args) return conditioner
Instantiate a condition fuser object.
def get_condition_fuser(cfg: omegaconf.DictConfig) -> ConditionFuser: """Instantiate a condition fuser object.""" fuser_cfg = getattr(cfg, 'fuser') fuser_methods = ['sum', 'cross', 'prepend', 'input_interpolate'] fuse2cond = {k: fuser_cfg[k] for k in fuser_methods} kwargs = {k: v for k, v in fuser_cfg.items() if k not in fuser_methods} fuser = ConditionFuser(fuse2cond=fuse2cond, **kwargs) return fuser
Instantiate a codebooks pattern provider object.
def get_codebooks_pattern_provider(n_q: int, cfg: omegaconf.DictConfig) -> CodebooksPatternProvider: """Instantiate a codebooks pattern provider object.""" pattern_providers = { 'parallel': ParallelPatternProvider, 'delay': DelayedPatternProvider, 'unroll': UnrolledPatternProvider, 'coarse_first': CoarseFirstPattern, 'musiclm': MusicLMPattern, } name = cfg.modeling kwargs = dict_from_config(cfg.get(name)) if hasattr(cfg, name) else {} klass = pattern_providers[name] return klass(n_q, **kwargs)
Instantiate a debug compression model to be used for unit tests.
def get_debug_compression_model(device='cpu', sample_rate: int = 32000): """Instantiate a debug compression model to be used for unit tests.""" assert sample_rate in [16000, 32000], "unsupported sample rate for debug compression model" model_ratios = { 16000: [10, 8, 8], # 25 Hz at 16kHz 32000: [10, 8, 16] # 25 Hz at 32kHz } ratios: tp.List[int] = model_ratios[sample_rate] frame_rate = 25 seanet_kwargs: dict = { 'n_filters': 4, 'n_residual_layers': 1, 'dimension': 32, 'ratios': ratios, } encoder = audiocraft.modules.SEANetEncoder(**seanet_kwargs) decoder = audiocraft.modules.SEANetDecoder(**seanet_kwargs) quantizer = qt.ResidualVectorQuantizer(dimension=32, bins=400, n_q=4) init_x = torch.randn(8, 32, 128) quantizer(init_x, 1) # initialize kmeans etc. compression_model = EncodecModel( encoder, decoder, quantizer, frame_rate=frame_rate, sample_rate=sample_rate, channels=1).to(device) return compression_model.eval()
Instantiate a debug LM to be used for unit tests.
def get_debug_lm_model(device='cpu'): """Instantiate a debug LM to be used for unit tests.""" pattern = DelayedPatternProvider(n_q=4) dim = 16 providers = { 'description': LUTConditioner(n_bins=128, dim=dim, output_dim=dim, tokenizer="whitespace"), } condition_provider = ConditioningProvider(providers) fuser = ConditionFuser( {'cross': ['description'], 'prepend': [], 'sum': [], 'input_interpolate': []}) lm = LMModel( pattern, condition_provider, fuser, n_q=4, card=400, dim=dim, num_heads=4, custom=True, num_layers=2, cross_attention=True, causal=True) return lm.to(device).eval()
LM layer initialization. Inspired from xlformers: https://github.com/fairinternal/xlformers Args: method (str): Method name for init function. Valid options are: 'gaussian', 'uniform'. input_dim (int): Input dimension of the initialized module. init_depth (int, optional): Optional init depth value used to rescale the standard deviation if defined.
def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None): """LM layer initialization. Inspired from xlformers: https://github.com/fairinternal/xlformers Args: method (str): Method name for init function. Valid options are: 'gaussian', 'uniform'. input_dim (int): Input dimension of the initialized module. init_depth (int, optional): Optional init depth value used to rescale the standard deviation if defined. """ # Compute std std = 1 / math.sqrt(input_dim) # Rescale with depth if init_depth is not None: std = std / math.sqrt(2 * init_depth) if method == 'gaussian': return partial( torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std ) elif method == 'uniform': bound = math.sqrt(3) * std # ensure the standard deviation is `std` return partial(torch.nn.init.uniform_, a=-bound, b=bound) else: raise ValueError("Unsupported layer initialization method")
Wrapper around ``get_init_fn`` for proper initialization of LM modules. Args: m (nn.Module): Module to initialize. method (str): Method name for the init function. init_depth (int, optional): Optional init depth value used to rescale the standard deviation if defined. zero_bias_init (bool): Whether to initialize the bias to 0 or not.
def init_layer(m: nn.Module, method: str, init_depth: tp.Optional[int] = None, zero_bias_init: bool = False): """Wrapper around ``get_init_fn`` for proper initialization of LM modules. Args: m (nn.Module): Module to initialize. method (str): Method name for the init function. init_depth (int, optional): Optional init depth value used to rescale the standard deviation if defined. zero_bias_init (bool): Whether to initialize the bias to 0 or not. """ if isinstance(m, nn.Linear): init_fn = get_init_fn(method, m.in_features, init_depth=init_depth) if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: weight = m.weight.float() init_fn(weight) m.weight.data[:] = weight.half() else: init_fn(m.weight) if zero_bias_init and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Embedding): init_fn = get_init_fn(method, m.embedding_dim, init_depth=None) if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: weight = m.weight.float() init_fn(weight) m.weight.data[:] = weight.half() else: init_fn(m.weight)
Helper function to map an activation string to the activation class. If the supplied activation is not a string that is recognized, the activation is passed back. Args: activation (str, or Callable[[Tensor], Tensor]): Activation to check
def get_activation_fn( activation: Union[str, Callable[[Tensor], Tensor]] ) -> Union[str, Callable[[Tensor], Tensor]]: """Helper function to map an activation string to the activation class. If the supplied activation is not a string that is recognized, the activation is passed back. Args: activation (str, or Callable[[Tensor], Tensor]): Activation to check """ if isinstance(activation, str): if activation == "reglu": return ReGLU() elif activation == "geglu": return GeGLU() elif activation == "swiglu": return SwiGLU() return activation
Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask
def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask
Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition.
def nullify_wav(cond: WavCondition) -> WavCondition: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], )
Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T].
def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], )
Utility function for nullifying an attribute inside an ConditioningAttributes object. If the condition is of type "wav", then nullify it using `nullify_condition` function. If the condition is of any other type, set its value to None. Works in-place.
def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str) -> ConditioningAttributes: """Utility function for nullifying an attribute inside an ConditioningAttributes object. If the condition is of type "wav", then nullify it using `nullify_condition` function. If the condition is of any other type, set its value to None. Works in-place. """ if condition_type not in ['text', 'wav', 'joint_embed']: raise ValueError( "dropout_condition got an unexpected condition type!" f" expected 'text', 'wav' or 'joint_embed' but got '{condition_type}'" ) if condition not in getattr(sample, condition_type): raise ValueError( "dropout_condition received an unexpected condition!" f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" f" but got '{condition}' of type '{condition_type}'!" ) if condition_type == 'wav': wav_cond = sample.wav[condition] sample.wav[condition] = nullify_wav(wav_cond) elif condition_type == 'joint_embed': embed = sample.joint_embed[condition] sample.joint_embed[condition] = nullify_joint_embed(embed) else: sample.text[condition] = None return sample
Return the proper normalization module. If causal is True, this will ensure the returned module is causal, or return an error if the normalization doesn't support causal evaluation.
def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs): """Return the proper normalization module. If causal is True, this will ensure the returned module is causal, or return an error if the normalization doesn't support causal evaluation. """ assert norm in CONV_NORMALIZATIONS if norm == 'time_group_norm': if causal: raise ValueError("GroupNorm doesn't support causal evaluation.") assert isinstance(module, nn.modules.conv._ConvNd) return nn.GroupNorm(1, module.out_channels, **norm_kwargs) else: return nn.Identity()
See `pad_for_conv1d`.
def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0) -> int: """See `pad_for_conv1d`.""" length = x.shape[-1] n_frames = (length - kernel_size + padding_total) / stride + 1 ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total) return ideal_length - length
Pad for a convolution to make sure that the last window is full. Extra padding is added at the end. This is required to ensure that we can rebuild an output of the same length, as otherwise, even with padding, some time steps might get removed. For instance, with total padding = 4, kernel size = 4, stride = 2: 0 0 1 2 3 4 5 0 0 # (0s are padding) 1 2 3 # (output frames of a convolution, last 0 is never used) 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding) 1 2 3 4 # once you removed padding, we are missing one time step !
def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0): """Pad for a convolution to make sure that the last window is full. Extra padding is added at the end. This is required to ensure that we can rebuild an output of the same length, as otherwise, even with padding, some time steps might get removed. For instance, with total padding = 4, kernel size = 4, stride = 2: 0 0 1 2 3 4 5 0 0 # (0s are padding) 1 2 3 # (output frames of a convolution, last 0 is never used) 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding) 1 2 3 4 # once you removed padding, we are missing one time step ! """ extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total) return F.pad(x, (0, extra_padding))
Tiny wrapper around F.pad, just to allow for reflect padding on small input. If this is the case, we insert extra 0 padding to the right before the reflection happen.
def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.): """Tiny wrapper around F.pad, just to allow for reflect padding on small input. If this is the case, we insert extra 0 padding to the right before the reflection happen. """ length = x.shape[-1] padding_left, padding_right = paddings assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) if mode == 'reflect': max_pad = max(padding_left, padding_right) extra_pad = 0 if length <= max_pad: extra_pad = max_pad - length + 1 x = F.pad(x, (0, extra_pad)) padded = F.pad(x, paddings, mode, value) end = padded.shape[-1] - extra_pad return padded[..., :end] else: return F.pad(x, paddings, mode, value)
Remove padding from x, handling properly zero padding. Only for 1d!
def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]): """Remove padding from x, handling properly zero padding. Only for 1d!""" padding_left, padding_right = paddings assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) assert (padding_left + padding_right) <= x.shape[-1] end = x.shape[-1] - padding_right return x[..., padding_left: end]
Create normalization module for transformer encoder layer. Args: norm_type (str): Normalization method. dim (int): Dimension of the normalized layer. **kwargs (dict): Additional parameters for normalization layer. Returns: nn.Module: Normalization module.
def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module: """Create normalization module for transformer encoder layer. Args: norm_type (str): Normalization method. dim (int): Dimension of the normalized layer. **kwargs (dict): Additional parameters for normalization layer. Returns: nn.Module: Normalization module. """ if norm_type == 'layer_norm': return nn.LayerNorm(dim, eps=1e-5, **kwargs) else: raise ValueError(f"Unknown norm type: {norm_type}")
Create sinusoidal positional embedding, with shape `[B, T, C]`. Args: positions (torch.Tensor): LongTensor of positions. dim (int): Dimension of the embedding. max_period (float): Maximum period of the cosine/sine functions. dtype (torch.dtype or str): dtype to use to generate the embedding. Returns: torch.Tensor: Sinusoidal positional embedding.
def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000, dtype: torch.dtype = torch.float32) -> torch.Tensor: """Create sinusoidal positional embedding, with shape `[B, T, C]`. Args: positions (torch.Tensor): LongTensor of positions. dim (int): Dimension of the embedding. max_period (float): Maximum period of the cosine/sine functions. dtype (torch.dtype or str): dtype to use to generate the embedding. Returns: torch.Tensor: Sinusoidal positional embedding. """ # We aim for BTC format assert dim % 2 == 0 half_dim = dim // 2 positions = positions.to(dtype) adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1) max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point phase = positions / (max_period_tensor ** (adim / (half_dim - 1))) return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)
torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers.
def expand_repeated_kv(x: torch.Tensor, n_rep: int, memory_efficient: bool) -> torch.Tensor: """torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers.""" if n_rep == 1: return x if _efficient_attention_backend == 'torch' and memory_efficient: bs, n_kv_heads, slen, head_dim = x.shape return ( x[:, :, None, :, :] .expand(bs, n_kv_heads, n_rep, slen, head_dim) .reshape(bs, n_kv_heads * n_rep, slen, head_dim) ) else: bs, slen, n_kv_heads, head_dim = x.shape return ( x[:, :, :, None, :] .expand(bs, slen, n_kv_heads, n_rep, head_dim) .reshape(bs, slen, n_kv_heads * n_rep, head_dim) )
Return whether we are using FSDP.
def is_fsdp_used() -> bool: """Return whether we are using FSDP.""" # A bit of a hack but should work from anywhere. if dora.is_xp(): cfg = dora.get_xp().cfg if hasattr(cfg, 'fsdp'): return cfg.fsdp.use return False
Wraps a model with FSDP.
def wrap_with_fsdp(cfg, model: torch.nn.Module, block_classes: tp.Optional[tp.Set[tp.Type]] = None) -> FSDP: """Wraps a model with FSDP.""" # Some of the typing is disabled until this gets integrated # into the stable version of PyTorch. from torch.distributed.fsdp.wrap import ModuleWrapPolicy # type: ignore # we import this here to prevent circular import. from ..modules.transformer import StreamingTransformerLayer from ..modules.conditioners import ConditioningProvider _fix_post_backward_hook() assert cfg.use sharding_strategy_dict = { "no_shard": ShardingStrategy.NO_SHARD, "shard_grad_op": ShardingStrategy.SHARD_GRAD_OP, "full_shard": ShardingStrategy.FULL_SHARD, } dtype_dict = { "float32": torch.float32, "float16": torch.float16, "bfloat16": torch.bfloat16, } mixed_precision_config = MixedPrecision( param_dtype=dtype_dict[cfg.param_dtype], reduce_dtype=dtype_dict[cfg.reduce_dtype], buffer_dtype=dtype_dict[cfg.buffer_dtype], ) sharding_strategy_config = sharding_strategy_dict[cfg.sharding_strategy] # The following is going to require being a bit smart # when doing LM, because this would flush the weights for every time step # during generation. One possiblity is to use hybrid sharding: # See: https://pytorch.org/docs/master/fsdp.html#torch.distributed.fsdp.ShardingStrategy assert sharding_strategy_config != ShardingStrategy.FULL_SHARD, \ "Not supported at the moment, requires a bit more work." local_rank = dora.distrib.get_distrib_spec().local_rank assert local_rank < torch.cuda.device_count(), "Please upgrade Dora!" auto_wrap_policy = None if block_classes is None: block_classes = {StreamingTransformerLayer, ConditioningProvider} if cfg.per_block: auto_wrap_policy = ModuleWrapPolicy(block_classes) wrapped = _FSDPFixStateDict( model, sharding_strategy=sharding_strategy_config, mixed_precision=mixed_precision_config, device_id=local_rank, sync_module_states=True, use_orig_params=True, auto_wrap_policy=auto_wrap_policy, ) # type: ignore FSDP.set_state_dict_type(wrapped, StateDictType.LOCAL_STATE_DICT) # type: ignore # Let the wrapped model know about the wrapping! # We use __dict__ to avoid it going into the state dict. # This is a bit dirty, but needed during generation, as otherwise # the wrapped model would call itself and bypass FSDP. for module in FSDP.fsdp_modules(wrapped): original = module._fsdp_wrapped_module original.__dict__['_fsdp'] = module return wrapped
Purge the FSDP cached shard inside the model. This should allow setting the best state or switching to the EMA.
def purge_fsdp(model: FSDP): """Purge the FSDP cached shard inside the model. This should allow setting the best state or switching to the EMA. """ from torch.distributed.fsdp._runtime_utils import _reshard # type: ignore for module in FSDP.fsdp_modules(model): handles = module._handles if not handles: continue handle = handles[0] unsharded_flat_param = handle._get_padded_unsharded_flat_param() storage_size: int = unsharded_flat_param._typed_storage()._size() # type: ignore if storage_size == 0: continue true_list = [True for h in handles] _reshard(module, handles, true_list)
Instantiate solver from config.
def get_solver(cfg: omegaconf.DictConfig) -> StandardSolver: """Instantiate solver from config.""" from .audiogen import AudioGenSolver from .compression import CompressionSolver from .musicgen import MusicGenSolver from .diffusion import DiffusionSolver from .magnet import MagnetSolver, AudioMagnetSolver klass = { 'compression': CompressionSolver, 'musicgen': MusicGenSolver, 'audiogen': AudioGenSolver, 'magnet': MagnetSolver, 'audio_magnet': AudioMagnetSolver, 'lm': MusicGenSolver, # backward compatibility 'diffusion': DiffusionSolver, 'sound_lm': AudioGenSolver, # backward compatibility }[cfg.solver] return klass(cfg)
Create parameter groups for the model using the appropriate method if defined for each modules, to create the different groups. Args: model (nn.Module): torch model Returns: List of parameter groups
def get_optim_parameter_groups(model: nn.Module): """Create parameter groups for the model using the appropriate method if defined for each modules, to create the different groups. Args: model (nn.Module): torch model Returns: List of parameter groups """ seen_params: tp.Set[nn.parameter.Parameter] = set() other_params = [] groups = [] for name, module in model.named_modules(): if hasattr(module, 'make_optim_group'): group = module.make_optim_group() params = set(group['params']) assert params.isdisjoint(seen_params) seen_params |= set(params) groups.append(group) for param in model.parameters(): if param not in seen_params: other_params.append(param) groups.insert(0, {'params': other_params}) parameters = groups return parameters
Build torch optimizer from config and set of parameters. Supported optimizers: Adam, AdamW Args: params (nn.Module or iterable of torch.Tensor): Parameters to optimize. cfg (DictConfig): Optimization-related configuration. Returns: torch.optim.Optimizer.
def get_optimizer(params: tp.Union[nn.Module, tp.Iterable[torch.Tensor]], cfg: omegaconf.DictConfig) -> Optimizer: """Build torch optimizer from config and set of parameters. Supported optimizers: Adam, AdamW Args: params (nn.Module or iterable of torch.Tensor): Parameters to optimize. cfg (DictConfig): Optimization-related configuration. Returns: torch.optim.Optimizer. """ if 'optimizer' not in cfg: if getattr(cfg, 'optim', None) is not None: raise KeyError("Optimizer not found in config. Try instantiating optimizer from cfg.optim?") else: raise KeyError("Optimizer not found in config.") parameters = get_optim_parameter_groups(params) if isinstance(params, nn.Module) else params optimizer: torch.optim.Optimizer if cfg.optimizer == 'adam': optimizer = torch.optim.Adam(parameters, lr=cfg.lr, **cfg.adam) elif cfg.optimizer == 'adamw': optimizer = torch.optim.AdamW(parameters, lr=cfg.lr, **cfg.adam) elif cfg.optimizer == 'dadam': optimizer = optim.DAdaptAdam(parameters, lr=cfg.lr, **cfg.adam) else: raise ValueError(f"Unsupported Optimizer: {cfg.optimizer}") return optimizer
Build torch learning rate scheduler from config and associated optimizer. Supported learning rate schedulers: ExponentialLRScheduler, PlateauLRScheduler Args: optimizer (torch.optim.Optimizer): Optimizer. cfg (DictConfig): Schedule-related configuration. total_updates (int): Total number of updates. Returns: torch.optim.Optimizer.
def get_lr_scheduler(optimizer: torch.optim.Optimizer, cfg: omegaconf.DictConfig, total_updates: int) -> tp.Optional[LRScheduler]: """Build torch learning rate scheduler from config and associated optimizer. Supported learning rate schedulers: ExponentialLRScheduler, PlateauLRScheduler Args: optimizer (torch.optim.Optimizer): Optimizer. cfg (DictConfig): Schedule-related configuration. total_updates (int): Total number of updates. Returns: torch.optim.Optimizer. """ if 'lr_scheduler' not in cfg: raise KeyError("LR Scheduler not found in config") lr_sched: tp.Optional[LRScheduler] = None if cfg.lr_scheduler == 'step': lr_sched = torch.optim.lr_scheduler.StepLR(optimizer, **cfg.step) elif cfg.lr_scheduler == 'exponential': lr_sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=cfg.exponential) elif cfg.lr_scheduler == 'cosine': kwargs = dict_from_config(cfg.cosine) warmup_steps = kwargs.pop('warmup') lr_sched = optim.CosineLRScheduler( optimizer, warmup_steps=warmup_steps, total_steps=total_updates, **kwargs) elif cfg.lr_scheduler == 'polynomial_decay': kwargs = dict_from_config(cfg.polynomial_decay) warmup_steps = kwargs.pop('warmup') lr_sched = optim.PolynomialDecayLRScheduler( optimizer, warmup_steps=warmup_steps, total_steps=total_updates, **kwargs) elif cfg.lr_scheduler == 'inverse_sqrt': kwargs = dict_from_config(cfg.inverse_sqrt) warmup_steps = kwargs.pop('warmup') lr_sched = optim.InverseSquareRootLRScheduler(optimizer, warmup_steps=warmup_steps, **kwargs) elif cfg.lr_scheduler == 'linear_warmup': kwargs = dict_from_config(cfg.linear_warmup) warmup_steps = kwargs.pop('warmup') lr_sched = optim.LinearWarmupLRScheduler(optimizer, warmup_steps=warmup_steps, **kwargs) elif cfg.lr_scheduler is not None: raise ValueError(f"Unsupported LR Scheduler: {cfg.lr_scheduler}") return lr_sched
Initialize Exponential Moving Average. Args: module_dict (nn.ModuleDict): ModuleDict for which to compute the EMA. cfg (omegaconf.DictConfig): Optim EMA configuration. Returns: optim.ModuleDictEMA: EMA version of the ModuleDict.
def get_ema(module_dict: nn.ModuleDict, cfg: omegaconf.DictConfig) -> tp.Optional[optim.ModuleDictEMA]: """Initialize Exponential Moving Average. Args: module_dict (nn.ModuleDict): ModuleDict for which to compute the EMA. cfg (omegaconf.DictConfig): Optim EMA configuration. Returns: optim.ModuleDictEMA: EMA version of the ModuleDict. """ kw: tp.Dict[str, tp.Any] = dict(cfg) use = kw.pop('use', False) decay = kw.pop('decay', None) device = kw.pop('device', None) if not use: return None if len(module_dict) == 0: raise ValueError("Trying to build EMA but an empty module_dict source is provided!") ema_module = optim.ModuleDictEMA(module_dict, decay=decay, device=device) return ema_module
Instantiate loss from configuration.
def get_loss(loss_name: str, cfg: omegaconf.DictConfig): """Instantiate loss from configuration.""" klass = { 'l1': torch.nn.L1Loss, 'l2': torch.nn.MSELoss, 'mel': losses.MelSpectrogramL1Loss, 'mrstft': losses.MRSTFTLoss, 'msspec': losses.MultiScaleMelSpectrogramLoss, 'sisnr': losses.SISNR, }[loss_name] kwargs = dict(getattr(cfg, loss_name)) return klass(**kwargs)
Instantiate loss balancer from configuration for the provided weights.
def get_balancer(loss_weights: tp.Dict[str, float], cfg: omegaconf.DictConfig) -> losses.Balancer: """Instantiate loss balancer from configuration for the provided weights.""" kwargs: tp.Dict[str, tp.Any] = dict_from_config(cfg) return losses.Balancer(loss_weights, **kwargs)
Initialize adversary from config.
def get_adversary(name: str, cfg: omegaconf.DictConfig) -> nn.Module: """Initialize adversary from config.""" klass = { 'msd': adversarial.MultiScaleDiscriminator, 'mpd': adversarial.MultiPeriodDiscriminator, 'msstftd': adversarial.MultiScaleSTFTDiscriminator, }[name] adv_cfg: tp.Dict[str, tp.Any] = dict(getattr(cfg, name)) return klass(**adv_cfg)
Initialize dict of adversarial losses from config.
def get_adversarial_losses(cfg) -> nn.ModuleDict: """Initialize dict of adversarial losses from config.""" device = cfg.device adv_cfg = getattr(cfg, 'adversarial') adversaries = adv_cfg.get('adversaries', []) adv_loss_name = adv_cfg['adv_loss'] feat_loss_name = adv_cfg.get('feat_loss') normalize = adv_cfg.get('normalize', True) feat_loss: tp.Optional[adversarial.FeatureMatchingLoss] = None if feat_loss_name: assert feat_loss_name in ['l1', 'l2'], f"Feature loss only support L1 or L2 but {feat_loss_name} found." loss = get_loss(feat_loss_name, cfg) feat_loss = adversarial.FeatureMatchingLoss(loss, normalize) loss = adversarial.get_adv_criterion(adv_loss_name) loss_real = adversarial.get_real_criterion(adv_loss_name) loss_fake = adversarial.get_fake_criterion(adv_loss_name) adv_losses = nn.ModuleDict() for adv_name in adversaries: adversary = get_adversary(adv_name, cfg).to(device) optimizer = get_optimizer(adversary.parameters(), cfg.optim) adv_loss = adversarial.AdversarialLoss( adversary, optimizer, loss=loss, loss_real=loss_real, loss_fake=loss_fake, loss_feat=feat_loss, normalize=normalize ) adv_losses[adv_name] = adv_loss return adv_losses
Instantiate ViSQOL metric from config.
def get_visqol(cfg: omegaconf.DictConfig) -> metrics.ViSQOL: """Instantiate ViSQOL metric from config.""" kwargs = dict_from_config(cfg) return metrics.ViSQOL(**kwargs)
Instantiate Frechet Audio Distance metric from config.
def get_fad(cfg: omegaconf.DictConfig) -> metrics.FrechetAudioDistanceMetric: """Instantiate Frechet Audio Distance metric from config.""" kwargs = dict_from_config(cfg.tf) xp = dora.get_xp() kwargs['log_folder'] = xp.folder return metrics.FrechetAudioDistanceMetric(**kwargs)
Instantiate KL-Divergence metric from config.
def get_kldiv(cfg: omegaconf.DictConfig) -> metrics.KLDivergenceMetric: """Instantiate KL-Divergence metric from config.""" kld_metrics = { 'passt': metrics.PasstKLDivergenceMetric, } klass = kld_metrics[cfg.model] kwargs = dict_from_config(cfg.get(cfg.model)) return klass(**kwargs)
Instantiate Text Consistency metric from config.
def get_text_consistency(cfg: omegaconf.DictConfig) -> metrics.TextConsistencyMetric: """Instantiate Text Consistency metric from config.""" text_consistency_metrics = { 'clap': metrics.CLAPTextConsistencyMetric } klass = text_consistency_metrics[cfg.model] kwargs = dict_from_config(cfg.get(cfg.model)) return klass(**kwargs)
Instantiate Chroma Cosine Similarity metric from config.
def get_chroma_cosine_similarity(cfg: omegaconf.DictConfig) -> metrics.ChromaCosineSimilarityMetric: """Instantiate Chroma Cosine Similarity metric from config.""" assert cfg.model == 'chroma_base', "Only support 'chroma_base' method for chroma cosine similarity metric" kwargs = dict_from_config(cfg.get(cfg.model)) return metrics.ChromaCosineSimilarityMetric(**kwargs)
Build AudioDataset from configuration. Args: cfg (omegaconf.DictConfig): Configuration. dataset_type: The type of dataset to create. Returns: dict[str, torch.utils.data.DataLoader]: Map of dataloader for each data split.
def get_audio_datasets(cfg: omegaconf.DictConfig, dataset_type: DatasetType = DatasetType.AUDIO) -> tp.Dict[str, torch.utils.data.DataLoader]: """Build AudioDataset from configuration. Args: cfg (omegaconf.DictConfig): Configuration. dataset_type: The type of dataset to create. Returns: dict[str, torch.utils.data.DataLoader]: Map of dataloader for each data split. """ dataloaders: dict = {} sample_rate = cfg.sample_rate channels = cfg.channels seed = cfg.seed max_sample_rate = cfg.datasource.max_sample_rate max_channels = cfg.datasource.max_channels assert cfg.dataset is not None, "Could not find dataset definition in config" dataset_cfg = dict_from_config(cfg.dataset) splits_cfg: dict = {} splits_cfg['train'] = dataset_cfg.pop('train') splits_cfg['valid'] = dataset_cfg.pop('valid') splits_cfg['evaluate'] = dataset_cfg.pop('evaluate') splits_cfg['generate'] = dataset_cfg.pop('generate') execute_only_stage = cfg.get('execute_only', None) for split, path in cfg.datasource.items(): if not isinstance(path, str): continue # skipping this as not a path if execute_only_stage is not None and split != execute_only_stage: continue logger.info(f"Loading audio data split {split}: {str(path)}") assert ( cfg.sample_rate <= max_sample_rate ), f"Expecting a max sample rate of {max_sample_rate} for datasource but {sample_rate} found." assert ( cfg.channels <= max_channels ), f"Expecting a max number of channels of {max_channels} for datasource but {channels} found." split_cfg = splits_cfg[split] split_kwargs = {k: v for k, v in split_cfg.items()} kwargs = {**dataset_cfg, **split_kwargs} # split kwargs overrides default dataset_cfg kwargs['sample_rate'] = sample_rate kwargs['channels'] = channels if kwargs.get('permutation_on_files') and cfg.optim.updates_per_epoch: kwargs['num_samples'] = ( flashy.distrib.world_size() * cfg.dataset.batch_size * cfg.optim.updates_per_epoch) num_samples = kwargs['num_samples'] shuffle = kwargs['shuffle'] return_info = kwargs.pop('return_info') batch_size = kwargs.pop('batch_size', None) num_workers = kwargs.pop('num_workers') if dataset_type == DatasetType.MUSIC: dataset = data.music_dataset.MusicDataset.from_meta(path, **kwargs) elif dataset_type == DatasetType.SOUND: dataset = data.sound_dataset.SoundDataset.from_meta(path, **kwargs) elif dataset_type == DatasetType.AUDIO: dataset = data.info_audio_dataset.InfoAudioDataset.from_meta(path, return_info=return_info, **kwargs) else: raise ValueError(f"Dataset type is unsupported: {dataset_type}") loader = get_loader( dataset, num_samples, batch_size=batch_size, num_workers=num_workers, seed=seed, collate_fn=dataset.collater if return_info else None, shuffle=shuffle, ) dataloaders[split] = loader return dataloaders
Audio reconstruction evaluation method that can be conveniently pickled.
def evaluate_audio_reconstruction(y_pred: torch.Tensor, y: torch.Tensor, cfg: omegaconf.DictConfig) -> dict: """Audio reconstruction evaluation method that can be conveniently pickled.""" metrics = {} if cfg.evaluate.metrics.visqol: visqol = builders.get_visqol(cfg.metrics.visqol) metrics['visqol'] = visqol(y_pred, y, cfg.sample_rate) sisnr = builders.get_loss('sisnr', cfg) metrics['sisnr'] = sisnr(y_pred, y) return metrics
Utility function for the EmbeddingCache, returning the full embedding without any chunking. This method can be used in case there is no need in extracting a chunk of the full embedding read from the cache. Args: full_embed (torch.Tensor): The full embedding. x (any): Batch object from which the full embedding is derived. idx (torch.Tensor): Index of object to consider in the batch object. Returns: full_embed (torch.Tensor): The full embedding
def get_full_embed(full_embed: torch.Tensor, x: tp.Any, idx: int, device: tp.Union[str, torch.device]) -> torch.Tensor: """Utility function for the EmbeddingCache, returning the full embedding without any chunking. This method can be used in case there is no need in extracting a chunk of the full embedding read from the cache. Args: full_embed (torch.Tensor): The full embedding. x (any): Batch object from which the full embedding is derived. idx (torch.Tensor): Index of object to consider in the batch object. Returns: full_embed (torch.Tensor): The full embedding """ return full_embed.to(device)
Checkpoint name formatted for all use in AudioCraft codebase and has the following format: `checkpoint_<name>.th(.<rank>)`. By convention, name is expected to be empty for last checkpoint, 'best' for the best checkpoint or the epoch number. Args: name (str, optional): Name suffix for the checkpoint file stem. rank (optional, int): Rank for distributed processing, retrieved with flashy if not provided. use_fsdp (bool): Whether the calling solver relies on FSDP. Returns: str: The checkpoint name.
def checkpoint_name(name: tp.Optional[str] = None, rank: tp.Optional[int] = None, use_fsdp: bool = False) -> str: """Checkpoint name formatted for all use in AudioCraft codebase and has the following format: `checkpoint_<name>.th(.<rank>)`. By convention, name is expected to be empty for last checkpoint, 'best' for the best checkpoint or the epoch number. Args: name (str, optional): Name suffix for the checkpoint file stem. rank (optional, int): Rank for distributed processing, retrieved with flashy if not provided. use_fsdp (bool): Whether the calling solver relies on FSDP. Returns: str: The checkpoint name. """ suffix = '' if rank is None: rank = flashy.distrib.rank() if rank > 0 and use_fsdp: suffix = '.' + str(rank) name_part = '' if name is not None: name_part = f'_{name}' return f'checkpoint{name_part}.th{suffix}'
Whether the checkpoint at the given path corresponds to a sharded checkpoint across rank.
def is_sharded_checkpoint(path: Path) -> bool: """Whether the checkpoint at the given path corresponds to a sharded checkpoint across rank.""" return re.search(r'\.th\.\d+$', path.name) is not None
Resolve a given checkpoint path for a provided dora sig or path. Args: sig_or_path (Path or str): Checkpoint path or dora signature. name (str, optional): Name suffix for the checkpoint file stem. rank (optional, int): Rank for distributed processing, retrieved with flashy if not provided. use_fsdp (bool): Whether the calling solver relies on FSDP. Returns: Path, optional: Resolved checkpoint path, if it exists.
def resolve_checkpoint_path(sig_or_path: tp.Union[Path, str], name: tp.Optional[str] = None, use_fsdp: bool = False) -> tp.Optional[Path]: """Resolve a given checkpoint path for a provided dora sig or path. Args: sig_or_path (Path or str): Checkpoint path or dora signature. name (str, optional): Name suffix for the checkpoint file stem. rank (optional, int): Rank for distributed processing, retrieved with flashy if not provided. use_fsdp (bool): Whether the calling solver relies on FSDP. Returns: Path, optional: Resolved checkpoint path, if it exists. """ from audiocraft import train xps_root = train.main.dora.dir / 'xps' sig_or_path = str(sig_or_path) if sig_or_path.startswith('//sig/'): sig = sig_or_path[len('//sig/'):] path = xps_root / sig else: path = Path(sig_or_path) path = AudioCraftEnvironment.resolve_reference_path(path) if path.is_dir(): path = path / checkpoint_name(name, use_fsdp=use_fsdp) if path.exists(): return path else: return None
Load state from checkpoints at the specified checkpoint path.
def load_checkpoint(checkpoint_path: Path, is_sharded: bool = False) -> tp.Any: """Load state from checkpoints at the specified checkpoint path.""" if is_sharded: rank0_checkpoint_path = checkpoint_path.parent / checkpoint_name(use_fsdp=False) if rank0_checkpoint_path.exists(): check_sharded_checkpoint(checkpoint_path, rank0_checkpoint_path) state = torch.load(checkpoint_path, 'cpu') logger.info("Checkpoint loaded from %s", checkpoint_path) return state
Save state to disk to the specified checkpoint_path.
def save_checkpoint(state: tp.Any, checkpoint_path: Path, is_sharded: bool = False) -> None: """Save state to disk to the specified checkpoint_path.""" _safe_save_checkpoint(state, checkpoint_path, is_sharded) logger.info("Checkpoint saved to %s", checkpoint_path)
Flush checkpoints to only keep last N checkpoints.
def flush_stale_checkpoints(checkpoint_path: Path, keep_last: tp.Optional[int] = None) -> None: """Flush checkpoints to only keep last N checkpoints.""" if keep_last is None or keep_last <= 0: return checkpoint_dir = checkpoint_path.parent suffix = '' if flashy.distrib.rank() > 0: suffix = f'.{flashy.distrib.rank()}' checkpoint_files_with_epoch = [] for path in Path(checkpoint_dir).glob(f'checkpoint_*.th{suffix}'): epoch_part = path.name.split('.', 1)[0].split('_', 1)[1] if epoch_part.isdigit(): checkpoint_files_with_epoch.append((path, int(epoch_part))) checkpoint_files = [path for path, _ in list(sorted(checkpoint_files_with_epoch, key=lambda t: t[1]))] total_to_flush = max(0, len(checkpoint_files) - keep_last) files_to_flush = checkpoint_files[:total_to_flush] for path in files_to_flush: logger.debug("Removing checkpoint: %s", str(path)) path.unlink(missing_ok=True)
Check sharded checkpoint state, ensuring the checkpoints are not corrupted.
def check_sharded_checkpoint(checkpoint_path: Path, rank0_checkpoint_path: Path) -> None: """Check sharded checkpoint state, ensuring the checkpoints are not corrupted.""" # Finish the work of a previous run that got interrupted while dumping. old_path = Path(str(checkpoint_path) + '.old') if old_path.exists(): raise RuntimeError( f"Old checkpoint {old_path} from previous version of this code exist, cannot safely proceed.") token = Path(str(rank0_checkpoint_path) + '.tmp.done') tmp_path = Path(str(checkpoint_path) + '.tmp') if token.exists(): if tmp_path.exists(): tmp_path.rename(checkpoint_path) flashy.distrib.barrier() if flashy.distrib.is_rank_zero() and token.exists(): token.unlink()
Save checkpoints in a safe manner even with when sharded checkpoints across nodes.
def _safe_save_checkpoint(state: tp.Any, checkpoint_path: Path, is_sharded: bool = False) -> None: """Save checkpoints in a safe manner even with when sharded checkpoints across nodes.""" def _barrier_if_sharded(): if is_sharded: flashy.distrib.barrier() if flashy.distrib.is_rank_zero(): token = Path(str(checkpoint_path) + '.tmp.done') if token.exists(): token.unlink() _barrier_if_sharded() with flashy.utils.write_and_rename(checkpoint_path) as f: torch.save(state, f) _barrier_if_sharded() if flashy.distrib.is_rank_zero(): token.touch() _barrier_if_sharded() _barrier_if_sharded() if flashy.distrib.rank() == 0: token.unlink()
Update SLURM parameters in configuration based on cluster type. If the cluster type is not specify, it infers it automatically.
def get_slurm_parameters( cfg: omegaconf.DictConfig, cluster_type: tp.Optional[ClusterType] = None ) -> omegaconf.DictConfig: """Update SLURM parameters in configuration based on cluster type. If the cluster type is not specify, it infers it automatically. """ from ..environment import AudioCraftEnvironment cluster_type = get_cluster_type(cluster_type) # apply cluster-specific adjustments if cluster_type == ClusterType.AWS: cfg["mem_per_gpu"] = None cfg["constraint"] = None cfg["setup"] = [] elif cluster_type == ClusterType.RSC: cfg["mem_per_gpu"] = None cfg["setup"] = [] cfg["constraint"] = None cfg["partition"] = "learn" slurm_exclude = AudioCraftEnvironment.get_slurm_exclude() if slurm_exclude is not None: cfg["exclude"] = slurm_exclude return cfg
Export only the best state from the given EnCodec checkpoint. This should be used if you trained your own EnCodec model.
def export_encodec(checkpoint_path: tp.Union[Path, str], out_file: tp.Union[Path, str]): """Export only the best state from the given EnCodec checkpoint. This should be used if you trained your own EnCodec model. """ pkg = torch.load(checkpoint_path, 'cpu') new_pkg = { 'best_state': pkg['best_state']['model'], 'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']), 'version': __version__, 'exported': True, } Path(out_file).parent.mkdir(exist_ok=True, parents=True) torch.save(new_pkg, out_file) return out_file
Export a compression model (potentially EnCodec) from a pretrained model. This is required for packaging the audio tokenizer along a MusicGen or AudioGen model. Do not include the //pretrained/ prefix. For instance if you trained a model with `facebook/encodec_32khz`, just put that as a name. Same for `dac_44khz`. In that case, this will not actually include a copy of the model, simply the reference to the model used.
def export_pretrained_compression_model(pretrained_encodec: str, out_file: tp.Union[Path, str]): """Export a compression model (potentially EnCodec) from a pretrained model. This is required for packaging the audio tokenizer along a MusicGen or AudioGen model. Do not include the //pretrained/ prefix. For instance if you trained a model with `facebook/encodec_32khz`, just put that as a name. Same for `dac_44khz`. In that case, this will not actually include a copy of the model, simply the reference to the model used. """ if Path(pretrained_encodec).exists(): pkg = torch.load(pretrained_encodec) assert 'best_state' in pkg assert 'xp.cfg' in pkg assert 'version' in pkg assert 'exported' in pkg else: pkg = { 'pretrained': pretrained_encodec, 'exported': True, 'version': __version__, } Path(out_file).parent.mkdir(exist_ok=True, parents=True) torch.save(pkg, out_file)
Export only the best state from the given MusicGen or AudioGen checkpoint.
def export_lm(checkpoint_path: tp.Union[Path, str], out_file: tp.Union[Path, str]): """Export only the best state from the given MusicGen or AudioGen checkpoint. """ pkg = torch.load(checkpoint_path, 'cpu') if pkg['fsdp_best_state']: best_state = pkg['fsdp_best_state']['model'] else: assert pkg['best_state'] best_state = pkg['best_state']['model'] new_pkg = { 'best_state': best_state, 'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']), 'version': __version__, 'exported': True, } Path(out_file).parent.mkdir(exist_ok=True, parents=True) torch.save(new_pkg, out_file) return out_file
Renders an audio player for the given audio samples. Args: samples (torch.Tensor): a Tensor of decoded audio samples with shapes [B, C, T] or [C, T] sample_rate (int): sample rate audio should be displayed with.
def display_audio(samples: torch.Tensor, sample_rate: int): """Renders an audio player for the given audio samples. Args: samples (torch.Tensor): a Tensor of decoded audio samples with shapes [B, C, T] or [C, T] sample_rate (int): sample rate audio should be displayed with. """ assert samples.dim() == 2 or samples.dim() == 3 samples = samples.detach().cpu() if samples.dim() == 2: samples = samples[None, ...] for audio in samples: ipd.display(ipd.Audio(audio, rate=sample_rate))
Return a model hash. This should allow us to track regressions in model init from the logs of past experiments.
def model_hash(model: torch.nn.Module) -> str: """Return a model hash. This should allow us to track regressions in model init from the logs of past experiments. """ hasher = hashlib.sha1() for p in model.parameters(): hasher.update(p.data.cpu().numpy().tobytes()) return hasher.hexdigest()
Convenience function to map an omegaconf configuration to a dictionary. Args: cfg (omegaconf.DictConfig): Original configuration to map to dict. Returns: dict: Config as dictionary object.
def dict_from_config(cfg: omegaconf.DictConfig) -> dict: """Convenience function to map an omegaconf configuration to a dictionary. Args: cfg (omegaconf.DictConfig): Original configuration to map to dict. Returns: dict: Config as dictionary object. """ dct = omegaconf.OmegaConf.to_container(cfg, resolve=True) assert isinstance(dct, dict) return dct
Convenience function to load dataset into a dataloader with optional subset sampling. Args: dataset: Dataset to load. num_samples (Optional[int]): Number of samples to limit subset size. batch_size (int): Batch size. num_workers (int): Number of workers for data loading. seed (int): Random seed.
def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int, num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader: """Convenience function to load dataset into a dataloader with optional subset sampling. Args: dataset: Dataset to load. num_samples (Optional[int]): Number of samples to limit subset size. batch_size (int): Batch size. num_workers (int): Number of workers for data loading. seed (int): Random seed. """ if num_samples is not None: dataset = random_subset(dataset, num_samples, seed) dataloader = flashy.distrib.loader( dataset, batch_size=batch_size, num_workers=num_workers, **kwargs ) return dataloader
torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension. Args: input (torch.Tensor): The input tensor containing probabilities. num_samples (int): Number of samples to draw. replacement (bool): Whether to draw with replacement or not. Keywords args: generator (torch.Generator): A pseudorandom number generator for sampling. Returns: torch.Tensor: Last dimension contains num_samples indices sampled from the multinomial probability distribution located in the last dimension of tensor input.
def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None): """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension. Args: input (torch.Tensor): The input tensor containing probabilities. num_samples (int): Number of samples to draw. replacement (bool): Whether to draw with replacement or not. Keywords args: generator (torch.Generator): A pseudorandom number generator for sampling. Returns: torch.Tensor: Last dimension contains num_samples indices sampled from the multinomial probability distribution located in the last dimension of tensor input. """ input_ = input.reshape(-1, input.shape[-1]) output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator) output = output_.reshape(*list(input.shape[:-1]), -1) return output
Sample next token from top K values along the last dimension of the input probs tensor. Args: probs (torch.Tensor): Input probabilities with token candidates on the last dimension. k (int): The k in “top-k”. Returns: torch.Tensor: Sampled tokens.
def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor: """Sample next token from top K values along the last dimension of the input probs tensor. Args: probs (torch.Tensor): Input probabilities with token candidates on the last dimension. k (int): The k in “top-k”. Returns: torch.Tensor: Sampled tokens. """ top_k_value, _ = torch.topk(probs, k, dim=-1) min_value_top_k = top_k_value[..., [-1]] probs *= (probs >= min_value_top_k).float() probs.div_(probs.sum(dim=-1, keepdim=True)) next_token = multinomial(probs, num_samples=1) return next_token
Sample next token from top P probabilities along the last dimension of the input probs tensor. Args: probs (torch.Tensor): Input probabilities with token candidates on the last dimension. p (int): The p in “top-p”. Returns: torch.Tensor: Sampled tokens.
def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor: """Sample next token from top P probabilities along the last dimension of the input probs tensor. Args: probs (torch.Tensor): Input probabilities with token candidates on the last dimension. p (int): The p in “top-p”. Returns: torch.Tensor: Sampled tokens. """ probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) probs_sum = torch.cumsum(probs_sort, dim=-1) mask = probs_sum - probs_sort > p probs_sort *= (~mask).float() probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) next_token = multinomial(probs_sort, num_samples=1) next_token = torch.gather(probs_idx, -1, next_token) return next_token
Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences). For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] Args: lengths (torch.Tensor): tensor with lengths max_len (int): can set the max length manually. Defaults to None. Returns: torch.Tensor: mask with 0s where there is pad tokens else 1s
def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor: """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences). For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] Args: lengths (torch.Tensor): tensor with lengths max_len (int): can set the max length manually. Defaults to None. Returns: torch.Tensor: mask with 0s where there is pad tokens else 1s """ assert len(lengths.shape) == 1, "Length shape should be 1 dimensional." final_length = lengths.max().item() if not max_len else max_len final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor return torch.arange(final_length, device=lengths.device)[None, :] < lengths[:, None]
Hash trick to pair each word with an index Args: word (str): word we wish to convert to an index vocab_size (int): size of the vocabulary Returns: int: index of the word in the embedding LUT
def hash_trick(word: str, vocab_size: int) -> int: """Hash trick to pair each word with an index Args: word (str): word we wish to convert to an index vocab_size (int): size of the vocabulary Returns: int: index of the word in the embedding LUT """ hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16) return hash % vocab_size
Decorator for a function so that the function will use a Random Number Generator whose state depend on the GPU rank. The original RNG state is restored upon returning. Args: base_seed (int): Random seed.
def with_rank_rng(base_seed: int = 1234): """Decorator for a function so that the function will use a Random Number Generator whose state depend on the GPU rank. The original RNG state is restored upon returning. Args: base_seed (int): Random seed. """ def _decorator(fun: tp.Callable): @wraps(fun) def _decorated(*args, **kwargs): state = torch.get_rng_state() seed = base_seed ^ flashy.distrib.rank() torch.manual_seed(seed) logger.debug('Rank dependent seed set to %d', seed) try: return fun(*args, **kwargs) finally: torch.set_rng_state(state) logger.debug('RNG state restored.') return _decorated return _decorator
Get a list of tensors and collate them to a single tensor. according to the following logic: - `dim` specifies the time dimension which will be stacked and padded. - The output will contain 1 new dimension (dimension index 0) which will be the size of of the original list. Args: tensors (tp.List[torch.Tensor]): List of tensors to collate. dim (int): Dimension which will be stacked and padded. Returns: tp.Tuple[torch.Tensor, torch.Tensor]: torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension (dimension index 0) which will be the size of the original list. torch.Tensor: Tensor containing length of original tensor sizes (without padding).
def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Get a list of tensors and collate them to a single tensor. according to the following logic: - `dim` specifies the time dimension which will be stacked and padded. - The output will contain 1 new dimension (dimension index 0) which will be the size of of the original list. Args: tensors (tp.List[torch.Tensor]): List of tensors to collate. dim (int): Dimension which will be stacked and padded. Returns: tp.Tuple[torch.Tensor, torch.Tensor]: torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension (dimension index 0) which will be the size of the original list. torch.Tensor: Tensor containing length of original tensor sizes (without padding). """ tensors = [x.transpose(0, dim) for x in tensors] lens = torch.LongTensor([len(x) for x in tensors]) padded_tensors = pad_sequence(tensors) padded_tensors = padded_tensors.transpose(0, 1) padded_tensors = padded_tensors.transpose(1, dim + 1) return padded_tensors, lens
Warn about a given message only once.
def warn_once(logger, msg): """Warn about a given message only once.""" logger.warning(msg)
Check if an object can be serialized into a json:
def is_jsonable(x: tp.Any): """Check if an object can be serialized into a json:""" try: json.dumps(x) return True except (TypeError, OverflowError): return False
Wrapper around state dict loading of CLAP model addressing compatibility issues between CLAP and AudioCraft HuggingFace transformer version. See: https://github.com/LAION-AI/CLAP/issues/118
def load_clap_state_dict(clap_model, path: tp.Union[str, Path]): """Wrapper around state dict loading of CLAP model addressing compatibility issues between CLAP and AudioCraft HuggingFace transformer version. See: https://github.com/LAION-AI/CLAP/issues/118 """ from clap_module.factory import load_state_dict # type: ignore pkg = load_state_dict(path) pkg.pop('text_branch.embeddings.position_ids', None) clap_model.model.load_state_dict(pkg)
Process string for safer file naming. Taken from https://github.com/django/django/blob/master/django/utils/text.py Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated dashes to single dashes. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores.
def slugify(value: tp.Any, allow_unicode: bool = False): """Process string for safer file naming. Taken from https://github.com/django/django/blob/master/django/utils/text.py Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated dashes to single dashes. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores. """ value = str(value) if allow_unicode: value = unicodedata.normalize("NFKC", value) else: value = ( unicodedata.normalize("NFKD", value) .encode("ascii", "ignore") .decode("ascii") ) value = re.sub(r"[^\w\s-]", "", value.lower()) return re.sub(r"[-\s]+", "-", value).strip("-_")
Gets a dictionary of matched samples across the given XPs. Each dictionary entry maps a sample id to a list of samples for that id. The number of samples per id will always match the number of XPs provided and will correspond to each XP in the same order given. In other words, only samples that can be match across all provided XPs will be returned in order to satisfy this rule. There are two types of ids that can be returned: stable and unstable. * Stable IDs are deterministic ids that were computed by the SampleManager given a sample's inputs (prompts/conditioning). This is why we can match them across XPs. * Unstable IDs are of the form "noinput_{idx}" and are generated on-the-fly, in order to map samples that used non-deterministic, random ids. This is the case for samples that did not use prompts or conditioning for their generation. This function will sort these samples by their id and match them by their index. Args: xps: a list of XPs to match samples from. start_epoch (int): If provided, only return samples corresponding to this epoch or newer. end_epoch (int): If provided, only return samples corresponding to this epoch or older. exclude_prompted (bool): If True, does not include samples that used a prompt. exclude_unprompted (bool): If True, does not include samples that did not use a prompt. exclude_conditioned (bool): If True, excludes samples that used conditioning. exclude_unconditioned (bool): If True, excludes samples that did not use conditioning.
def get_samples_for_xps(xps: tp.List[dora.XP], **kwargs) -> tp.Dict[str, tp.List[Sample]]: """Gets a dictionary of matched samples across the given XPs. Each dictionary entry maps a sample id to a list of samples for that id. The number of samples per id will always match the number of XPs provided and will correspond to each XP in the same order given. In other words, only samples that can be match across all provided XPs will be returned in order to satisfy this rule. There are two types of ids that can be returned: stable and unstable. * Stable IDs are deterministic ids that were computed by the SampleManager given a sample's inputs (prompts/conditioning). This is why we can match them across XPs. * Unstable IDs are of the form "noinput_{idx}" and are generated on-the-fly, in order to map samples that used non-deterministic, random ids. This is the case for samples that did not use prompts or conditioning for their generation. This function will sort these samples by their id and match them by their index. Args: xps: a list of XPs to match samples from. start_epoch (int): If provided, only return samples corresponding to this epoch or newer. end_epoch (int): If provided, only return samples corresponding to this epoch or older. exclude_prompted (bool): If True, does not include samples that used a prompt. exclude_unprompted (bool): If True, does not include samples that did not use a prompt. exclude_conditioned (bool): If True, excludes samples that used conditioning. exclude_unconditioned (bool): If True, excludes samples that did not use conditioning. """ managers = [SampleManager(xp) for xp in xps] samples_per_xp = [manager.get_samples(**kwargs) for manager in managers] stable_samples = _match_stable_samples(samples_per_xp) unstable_samples = _match_unstable_samples(samples_per_xp) return dict(stable_samples, **unstable_samples)
Just to make path a bit nicer, make them relative to the Dora root dir.
def normalize_path(path: Path): """Just to make path a bit nicer, make them relative to the Dora root dir. """ path = path.resolve() dora_dir = train.main.dora.dir.resolve() / 'xps' return path.relative_to(dora_dir)
Revert `normalize_path`.
def get_full_path(normalized_path: Path): """Revert `normalize_path`. """ return train.main.dora.dir.resolve() / 'xps' / normalized_path
Return a signature for a list of XP signatures.
def get_signature(xps: tp.List[str]): """Return a signature for a list of XP signatures. """ return sha1(json.dumps(xps).encode()).hexdigest()[:10]
Ensure user is logged in.
def ensure_logged(func): """Ensure user is logged in. """ @wraps(func) def _wrapped(*args, **kwargs): user = session.get('user') if user is None: return redirect(url_for('login', redirect_to=request.url)) return func(*args, **kwargs) return _wrapped
Login user if not already, then redirect.
def login(): """Login user if not already, then redirect. """ user = session.get('user') if user is None: error = None if request.method == 'POST': user = request.form['user'] if not user: error = 'User cannot be empty' if user is None or error: return render_template('login.html', error=error) assert user session['user'] = user redirect_to = request.args.get('redirect_to') if redirect_to is None: redirect_to = url_for('index') return redirect(redirect_to)
Offer to create a new study.
def index(): """Offer to create a new study. """ errors = [] if request.method == 'POST': xps_or_grids = [part.strip() for part in request.form['xps'].split()] xps = set() for xp_or_grid in xps_or_grids: xp_path = train.main.dora.dir / 'xps' / xp_or_grid if xp_path.exists(): xps.add(xp_or_grid) continue grid_path = train.main.dora.dir / 'grids' / xp_or_grid if grid_path.exists(): for child in grid_path.iterdir(): if child.is_symlink(): xps.add(child.name) continue errors.append(f'{xp_or_grid} is neither an XP nor a grid!') assert xps or errors blind = 'true' if request.form.get('blind') == 'on' else 'false' xps = list(xps) if not errors: signature = get_signature(xps) manifest = { 'xps': xps, } survey_path = surveys / signature survey_path.mkdir(exist_ok=True) with open(survey_path / 'manifest.json', 'w') as f: json.dump(manifest, f, indent=2) return redirect(url_for('survey', blind=blind, signature=signature)) return render_template('index.html', errors=errors)
Inference audio tagging result of an audio clip.
def audio_tagging(args): """Inference audio tagging result of an audio clip. """ # Arugments & parameters sample_rate = args.sample_rate window_size = args.window_size hop_size = args.hop_size mel_bins = args.mel_bins fmin = args.fmin fmax = args.fmax model_type = args.model_type checkpoint_path = args.checkpoint_path audio_path = args.audio_path device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu') classes_num = config.classes_num labels = config.labels # Model Model = eval(model_type) model = Model(sample_rate=sample_rate, window_size=window_size, hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax, classes_num=classes_num) checkpoint = torch.load(checkpoint_path, map_location=device) model.load_state_dict(checkpoint['model']) # Parallel if 'cuda' in str(device): model.to(device) print('GPU number: {}'.format(torch.cuda.device_count())) model = torch.nn.DataParallel(model) else: print('Using CPU.') # Load audio (waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True) waveform = waveform[None, :] # (1, audio_length) waveform = move_data_to_device(waveform, device) # Forward with torch.no_grad(): model.eval() batch_output_dict = model(waveform, None) clipwise_output = batch_output_dict['clipwise_output'].data.cpu().numpy()[0] """(classes_num,)""" sorted_indexes = np.argsort(clipwise_output)[::-1] # Print audio tagging top probabilities for k in range(10): print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]], clipwise_output[sorted_indexes[k]])) # Print embedding if 'embedding' in batch_output_dict.keys(): embedding = batch_output_dict['embedding'].data.cpu().numpy()[0] print('embedding: {}'.format(embedding.shape)) return clipwise_output, labels
Inference sound event detection result of an audio clip.
def sound_event_detection(args): """Inference sound event detection result of an audio clip. """ # Arugments & parameters sample_rate = args.sample_rate window_size = args.window_size hop_size = args.hop_size mel_bins = args.mel_bins fmin = args.fmin fmax = args.fmax model_type = args.model_type checkpoint_path = args.checkpoint_path audio_path = args.audio_path device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu') classes_num = config.classes_num labels = config.labels frames_per_second = sample_rate // hop_size # Paths fig_path = os.path.join('results', '{}.png'.format(get_filename(audio_path))) create_folder(os.path.dirname(fig_path)) # Model Model = eval(model_type) model = Model(sample_rate=sample_rate, window_size=window_size, hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax, classes_num=classes_num) checkpoint = torch.load(checkpoint_path, map_location=device) model.load_state_dict(checkpoint['model']) # Parallel print('GPU number: {}'.format(torch.cuda.device_count())) model = torch.nn.DataParallel(model) if 'cuda' in str(device): model.to(device) # Load audio (waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True) waveform = waveform[None, :] # (1, audio_length) waveform = move_data_to_device(waveform, device) # Forward with torch.no_grad(): model.eval() batch_output_dict = model(waveform, None) framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0] """(time_steps, classes_num)""" print('Sound event detection result (time_steps x classes_num): {}'.format( framewise_output.shape)) sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1] top_k = 10 # Show top results top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]] """(time_steps, top_k)""" # Plot result stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=window_size, hop_length=hop_size, window='hann', center=True) frames_num = stft.shape[-1] fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4)) axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet') axs[0].set_ylabel('Frequency bins') axs[0].set_title('Log spectrogram') axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1) axs[1].xaxis.set_ticks(np.arange(0, frames_num, frames_per_second)) axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / frames_per_second)) axs[1].yaxis.set_ticks(np.arange(0, top_k)) axs[1].yaxis.set_ticklabels(np.array(labels)[sorted_indexes[0 : top_k]]) axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3) axs[1].set_xlabel('Seconds') axs[1].xaxis.set_ticks_position('bottom') plt.tight_layout() plt.savefig(fig_path) print('Save sound event detection visualization to {}'.format(fig_path)) return framewise_output, labels
Binary crossentropy loss.
def clip_bce(output_dict, target_dict): """Binary crossentropy loss. """ return F.binary_cross_entropy( output_dict['clipwise_output'], target_dict['target'])
Train AudioSet tagging model. Args: dataset_dir: str workspace: str data_type: 'balanced_train' | 'full_train' window_size: int hop_size: int mel_bins: int model_type: str loss_type: 'clip_bce' balanced: 'none' | 'balanced' | 'alternate' augmentation: 'none' | 'mixup' batch_size: int learning_rate: float resume_iteration: int early_stop: int accumulation_steps: int cuda: bool
def train(args): """Train AudioSet tagging model. Args: dataset_dir: str workspace: str data_type: 'balanced_train' | 'full_train' window_size: int hop_size: int mel_bins: int model_type: str loss_type: 'clip_bce' balanced: 'none' | 'balanced' | 'alternate' augmentation: 'none' | 'mixup' batch_size: int learning_rate: float resume_iteration: int early_stop: int accumulation_steps: int cuda: bool """ # Arugments & parameters workspace = args.workspace data_type = args.data_type sample_rate = args.sample_rate window_size = args.window_size hop_size = args.hop_size mel_bins = args.mel_bins fmin = args.fmin fmax = args.fmax model_type = args.model_type loss_type = args.loss_type balanced = args.balanced augmentation = args.augmentation batch_size = args.batch_size learning_rate = args.learning_rate resume_iteration = args.resume_iteration early_stop = args.early_stop device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu') filename = args.filename num_workers = 8 clip_samples = config.clip_samples classes_num = config.classes_num loss_func = get_loss_func(loss_type) # Paths black_list_csv = None train_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes', '{}.h5'.format(data_type)) eval_bal_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes', 'balanced_train.h5') eval_test_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes', 'eval.h5') checkpoints_dir = os.path.join(workspace, 'checkpoints', filename, 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format( sample_rate, window_size, hop_size, mel_bins, fmin, fmax), 'data_type={}'.format(data_type), model_type, 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced), 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size)) create_folder(checkpoints_dir) statistics_path = os.path.join(workspace, 'statistics', filename, 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format( sample_rate, window_size, hop_size, mel_bins, fmin, fmax), 'data_type={}'.format(data_type), model_type, 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced), 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size), 'statistics.pkl') create_folder(os.path.dirname(statistics_path)) logs_dir = os.path.join(workspace, 'logs', filename, 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format( sample_rate, window_size, hop_size, mel_bins, fmin, fmax), 'data_type={}'.format(data_type), model_type, 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced), 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size)) create_logging(logs_dir, filemode='w') logging.info(args) if 'cuda' in str(device): logging.info('Using GPU.') device = 'cuda' else: logging.info('Using CPU. Set --cuda flag to use GPU.') device = 'cpu' # Model Model = eval(model_type) model = Model(sample_rate=sample_rate, window_size=window_size, hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax, classes_num=classes_num) total = sum(p.numel() for p in model.parameters()) print("Total params: %.2fM" % (total/1e6)) logging.info("Total params: %.2fM" % (total/1e6)) #params_num = count_parameters(model) # flops_num = count_flops(model, clip_samples) #logging.info('Parameters num: {}'.format(params_num)) # logging.info('Flops num: {:.3f} G'.format(flops_num / 1e9)) # Dataset will be used by DataLoader later. Dataset takes a meta as input # and return a waveform and a target. dataset = AudioSetDataset(sample_rate=sample_rate) # Train sampler if balanced == 'none': Sampler = TrainSampler elif balanced == 'balanced': Sampler = BalancedTrainSampler elif balanced == 'alternate': Sampler = AlternateTrainSampler train_sampler = Sampler( indexes_hdf5_path=train_indexes_hdf5_path, batch_size=batch_size * 2 if 'mixup' in augmentation else batch_size, black_list_csv=black_list_csv) # Evaluate sampler eval_bal_sampler = EvaluateSampler( indexes_hdf5_path=eval_bal_indexes_hdf5_path, batch_size=batch_size) eval_test_sampler = EvaluateSampler( indexes_hdf5_path=eval_test_indexes_hdf5_path, batch_size=batch_size) # Data loader train_loader = torch.utils.data.DataLoader(dataset=dataset, batch_sampler=train_sampler, collate_fn=collate_fn, num_workers=num_workers, pin_memory=True) eval_bal_loader = torch.utils.data.DataLoader(dataset=dataset, batch_sampler=eval_bal_sampler, collate_fn=collate_fn, num_workers=num_workers, pin_memory=True) eval_test_loader = torch.utils.data.DataLoader(dataset=dataset, batch_sampler=eval_test_sampler, collate_fn=collate_fn, num_workers=num_workers, pin_memory=True) mix=0.5 if 'mixup' in augmentation: mixup_augmenter = Mixup(mixup_alpha=mix) print(mix) logging.info(mix) # Evaluator evaluator = Evaluator(model=model) # Statistics statistics_container = StatisticsContainer(statistics_path) # Optimizer optimizer = optim.AdamW(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.05, amsgrad=True) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=4, min_lr=1e-06, verbose=True) train_bgn_time = time.time() # Resume training if resume_iteration > 0: resume_checkpoint_path = os.path.join(workspace, 'checkpoints', filename, 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format( sample_rate, window_size, hop_size, mel_bins, fmin, fmax), 'data_type={}'.format(data_type), model_type, 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced), 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size), '{}_iterations.pth'.format(resume_iteration)) logging.info('Loading checkpoint {}'.format(resume_checkpoint_path)) checkpoint = torch.load(resume_checkpoint_path) model.load_state_dict(checkpoint['model']) train_sampler.load_state_dict(checkpoint['sampler']) statistics_container.load_state_dict(resume_iteration) iteration = checkpoint['iteration'] else: iteration = 0 # Parallel print('GPU number: {}'.format(torch.cuda.device_count())) model = torch.nn.DataParallel(model) if 'cuda' in str(device): model.to(device) if resume_iteration: optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) print(optimizer.state_dict()['param_groups'][0]['lr']) time1 = time.time() for batch_data_dict in train_loader: """batch_data_dict: { 'audio_name': (batch_size [*2 if mixup],), 'waveform': (batch_size [*2 if mixup], clip_samples), 'target': (batch_size [*2 if mixup], classes_num), (ifexist) 'mixup_lambda': (batch_size * 2,)} """ # Evaluate if (iteration % 2000 == 0 and iteration >= resume_iteration) or (iteration == 0): train_fin_time = time.time() bal_statistics = evaluator.evaluate(eval_bal_loader) test_statistics = evaluator.evaluate(eval_test_loader) logging.info('Validate bal mAP: {:.3f}'.format( np.mean(bal_statistics['average_precision']))) logging.info('Validate test mAP: {:.3f}'.format( np.mean(test_statistics['average_precision']))) statistics_container.append(iteration, bal_statistics, data_type='bal') statistics_container.append(iteration, test_statistics, data_type='test') statistics_container.dump() train_time = train_fin_time - train_bgn_time validate_time = time.time() - train_fin_time logging.info( 'iteration: {}, train time: {:.3f} s, validate time: {:.3f} s' ''.format(iteration, train_time, validate_time)) logging.info('------------------------------------') train_bgn_time = time.time() # Save model if iteration % 2000 == 0: checkpoint = { 'iteration': iteration, 'model': model.module.state_dict(), 'sampler': train_sampler.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict()} checkpoint_path = os.path.join( checkpoints_dir, '{}_iterations.pth'.format(iteration)) torch.save(checkpoint, checkpoint_path) logging.info('Model saved to {}'.format(checkpoint_path)) # Mixup lambda if 'mixup' in augmentation: batch_data_dict['mixup_lambda'] = mixup_augmenter.get_lambda( batch_size=len(batch_data_dict['waveform'])) # Move data to device for key in batch_data_dict.keys(): batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device) # Forward model.train() if 'mixup' in augmentation: batch_output_dict = model(batch_data_dict['waveform'], batch_data_dict['mixup_lambda']) """{'clipwise_output': (batch_size, classes_num), ...}""" batch_target_dict = {'target': do_mixup(batch_data_dict['target'], batch_data_dict['mixup_lambda'])} """{'target': (batch_size, classes_num)}""" else: batch_output_dict = model(batch_data_dict['waveform'], None) """{'clipwise_output': (batch_size, classes_num), ...}""" batch_target_dict = {'target': batch_data_dict['target']} """{'target': (batch_size, classes_num)}""" # Loss loss = loss_func(batch_output_dict, batch_target_dict) # Backward loss.backward() optimizer.step() optimizer.zero_grad() if iteration % 10 == 0: print(iteration, loss) #print('--- Iteration: {}, train time: {:.3f} s / 10 iterations ---'\ # .format(iteration, time.time() - time1)) #time1 = time.time() if iteration % 2000 == 0: scheduler.step(np.mean(test_statistics['average_precision'])) print(optimizer.state_dict()['param_groups'][0]['lr']) logging.info(optimizer.state_dict()['param_groups'][0]['lr']) # Stop learning if iteration == early_stop: break iteration += 1
Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. revise_keys (list): A list of customized keywords to modify the state_dict in checkpoint. Each item is a (pattern, replacement) pair of the regular expression operations. Default: strip the prefix 'module.' by [(r'^module\.', '')]. Returns: dict or OrderedDict: The loaded checkpoint.
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None, revise_keys=[(r'^module\.', '')]): """Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. revise_keys (list): A list of customized keywords to modify the state_dict in checkpoint. Each item is a (pattern, replacement) pair of the regular expression operations. Default: strip the prefix 'module.' by [(r'^module\\.', '')]. Returns: dict or OrderedDict: The loaded checkpoint. """ checkpoint = _load_checkpoint(filename, map_location, logger) new_proj = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(4, 4), padding=(2, 2)) new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=1).unsqueeze(1)) checkpoint['patch_embed1.proj.weight'] = new_proj.weight # OrderedDict is a subclass of dict if not isinstance(checkpoint, dict): raise RuntimeError( f'No state_dict found in checkpoint file {filename}') # get state_dict from checkpoint if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint # strip prefix of state_dict metadata = getattr(state_dict, '_metadata', OrderedDict()) for p, r in revise_keys: state_dict = OrderedDict( {re.sub(p, r, k): v for k, v in state_dict.items()}) state_dict = OrderedDict({k.replace('backbone.',''):v for k,v in state_dict.items()}) # Keep metadata in state_dict state_dict._metadata = metadata # load state_dict load_state_dict(model, state_dict, strict, logger) return checkpoint
Initialize a Linear or Convolutional layer.
def init_layer(layer): """Initialize a Linear or Convolutional layer. """ nn.init.xavier_uniform_(layer.weight) if hasattr(layer, 'bias'): if layer.bias is not None: layer.bias.data.fill_(0.)
Initialize a Batchnorm layer.
def init_bn(bn): """Initialize a Batchnorm layer. """ bn.bias.data.fill_(0.) bn.weight.data.fill_(1.)
convert patch embedding weight from manual patchify + linear proj to conv
def _conv_filter(state_dict, patch_size=16): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k: v = v.reshape((v.shape[0], 3, patch_size, patch_size)) out_dict[k] = v return out_dict
Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes (1, 3, 5, ...). Args: x: (batch_size * 2, ...) mixup_lambda: (batch_size * 2,) Returns: out: (batch_size, ...)
def do_mixup(x, mixup_lambda): """Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes (1, 3, 5, ...). Args: x: (batch_size * 2, ...) mixup_lambda: (batch_size * 2,) Returns: out: (batch_size, ...) """ out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \ x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1) return out
Forward data to a model. Args: model: object generator: object return_input: bool return_target: bool Returns: audio_name: (audios_num,) clipwise_output: (audios_num, classes_num) (ifexist) segmentwise_output: (audios_num, segments_num, classes_num) (ifexist) framewise_output: (audios_num, frames_num, classes_num) (optional) return_input: (audios_num, segment_samples) (optional) return_target: (audios_num, classes_num)
def forward(model, generator, return_input=False, return_target=False): """Forward data to a model. Args: model: object generator: object return_input: bool return_target: bool Returns: audio_name: (audios_num,) clipwise_output: (audios_num, classes_num) (ifexist) segmentwise_output: (audios_num, segments_num, classes_num) (ifexist) framewise_output: (audios_num, frames_num, classes_num) (optional) return_input: (audios_num, segment_samples) (optional) return_target: (audios_num, classes_num) """ output_dict = {} device = next(model.parameters()).device time1 = time.time() # Forward data to a model in mini-batches for n, batch_data_dict in enumerate(generator): print(n) batch_waveform = move_data_to_device(batch_data_dict['waveform'], device) with torch.no_grad(): model.eval() batch_output = model(batch_waveform) append_to_dict(output_dict, 'audio_name', batch_data_dict['audio_name']) append_to_dict(output_dict, 'clipwise_output', batch_output['clipwise_output'].data.cpu().numpy()) if 'segmentwise_output' in batch_output.keys(): append_to_dict(output_dict, 'segmentwise_output', batch_output['segmentwise_output'].data.cpu().numpy()) if 'framewise_output' in batch_output.keys(): append_to_dict(output_dict, 'framewise_output', batch_output['framewise_output'].data.cpu().numpy()) if return_input: append_to_dict(output_dict, 'waveform', batch_data_dict['waveform']) if return_target: if 'target' in batch_data_dict.keys(): append_to_dict(output_dict, 'target', batch_data_dict['target']) if n % 10 == 0: print(' --- Inference time: {:.3f} s / 10 iterations ---'.format( time.time() - time1)) time1 = time.time() for key in output_dict.keys(): output_dict[key] = np.concatenate(output_dict[key], axis=0) return output_dict
Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: x: (batch_size, time_steps, classes_num) ratio: int, ratio to interpolate Returns: upsampled: (batch_size, time_steps * ratio, classes_num)
def interpolate(x, ratio): """Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: x: (batch_size, time_steps, classes_num) ratio: int, ratio to interpolate Returns: upsampled: (batch_size, time_steps * ratio, classes_num) """ (batch_size, time_steps, classes_num) = x.shape upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1) upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num) return upsampled
Pad framewise_output to the same length as input frames. The pad value is the same as the value of the last frame. Args: framewise_output: (batch_size, frames_num, classes_num) frames_num: int, number of frames to pad Outputs: output: (batch_size, frames_num, classes_num)
def pad_framewise_output(framewise_output, frames_num): """Pad framewise_output to the same length as input frames. The pad value is the same as the value of the last frame. Args: framewise_output: (batch_size, frames_num, classes_num) frames_num: int, number of frames to pad Outputs: output: (batch_size, frames_num, classes_num) """ pad = framewise_output[:, -1 :, :].repeat(1, frames_num - framewise_output.shape[1], 1) """tensor for padding""" output = torch.cat((framewise_output, pad), dim=1) """(batch_size, frames_num, classes_num)""" return output
Count flops. Code modified from others' implementation.
def count_flops(model, audio_length): """Count flops. Code modified from others' implementation. """ multiply_adds = True list_conv2d=[] def conv2d_hook(self, input, output): batch_size, input_channels, input_height, input_width = input[0].size() output_channels, output_height, output_width = output[0].size() kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (2 if multiply_adds else 1) bias_ops = 1 if self.bias is not None else 0 params = output_channels * (kernel_ops + bias_ops) flops = batch_size * params * output_height * output_width list_conv2d.append(flops) list_conv1d=[] def conv1d_hook(self, input, output): batch_size, input_channels, input_length = input[0].size() output_channels, output_length = output[0].size() kernel_ops = self.kernel_size[0] * (self.in_channels / self.groups) * (2 if multiply_adds else 1) bias_ops = 1 if self.bias is not None else 0 params = output_channels * (kernel_ops + bias_ops) flops = batch_size * params * output_length list_conv1d.append(flops) list_linear=[] def linear_hook(self, input, output): batch_size = input[0].size(0) if input[0].dim() == 2 else 1 weight_ops = self.weight.nelement() * (2 if multiply_adds else 1) bias_ops = self.bias.nelement() flops = batch_size * (weight_ops + bias_ops) list_linear.append(flops) list_bn=[] def bn_hook(self, input, output): list_bn.append(input[0].nelement() * 2) list_relu=[] def relu_hook(self, input, output): list_relu.append(input[0].nelement() * 2) list_pooling2d=[] def pooling2d_hook(self, input, output): batch_size, input_channels, input_height, input_width = input[0].size() output_channels, output_height, output_width = output[0].size() kernel_ops = self.kernel_size * self.kernel_size bias_ops = 0 params = output_channels * (kernel_ops + bias_ops) flops = batch_size * params * output_height * output_width list_pooling2d.append(flops) list_pooling1d=[] def pooling1d_hook(self, input, output): batch_size, input_channels, input_length = input[0].size() output_channels, output_length = output[0].size() kernel_ops = self.kernel_size[0] bias_ops = 0 params = output_channels * (kernel_ops + bias_ops) flops = batch_size * params * output_length list_pooling2d.append(flops) def foo(net): childrens = list(net.children()) if not childrens: if isinstance(net, nn.Conv2d): net.register_forward_hook(conv2d_hook) elif isinstance(net, nn.Conv1d): net.register_forward_hook(conv1d_hook) elif isinstance(net, nn.Linear): net.register_forward_hook(linear_hook) elif isinstance(net, nn.BatchNorm2d) or isinstance(net, nn.BatchNorm1d): net.register_forward_hook(bn_hook) elif isinstance(net, nn.ReLU): net.register_forward_hook(relu_hook) elif isinstance(net, nn.AvgPool2d) or isinstance(net, nn.MaxPool2d): net.register_forward_hook(pooling2d_hook) elif isinstance(net, nn.AvgPool1d) or isinstance(net, nn.MaxPool1d): net.register_forward_hook(pooling1d_hook) else: print('Warning: flop of module {} is not counted!'.format(net)) return for c in childrens: foo(c) # Register hook foo(model) device = device = next(model.parameters()).device input = torch.rand(1, audio_length).to(device) out = model(input) total_flops = sum(list_conv2d) + sum(list_conv1d) + sum(list_linear) + \ sum(list_bn) + sum(list_relu) + sum(list_pooling2d) + sum(list_pooling1d) return total_flops
Create black list. Black list is a list of audio ids that will be skipped in training.
def dcase2017task4(args): """Create black list. Black list is a list of audio ids that will be skipped in training. """ # Augments & parameters workspace = args.workspace # Black list from DCASE 2017 Task 4 test_weak_csv = 'metadata/black_list/groundtruth_weak_label_testing_set.csv' evaluation_weak_csv = 'metadata/black_list/groundtruth_weak_label_evaluation_set.csv' black_list_csv = os.path.join(workspace, 'black_list', 'dcase2017task4.csv') create_folder(os.path.dirname(black_list_csv)) def get_id_sets(csv_path): with open(csv_path, 'r') as fr: reader = csv.reader(fr, delimiter='\t') lines = list(reader) ids_set = [] for line in lines: """line: ['-5QrBL6MzLg_60.000_70.000.wav', '60.000', '70.000', 'Train horn']""" ids_set.append(line[0][0 : 11]) ids_set = list(set(ids_set)) return ids_set test_ids_set = get_id_sets(test_weak_csv) evaluation_ids_set = get_id_sets(evaluation_weak_csv) full_ids_set = test_ids_set + evaluation_ids_set # Write black list fw = open(black_list_csv, 'w') for id in full_ids_set: fw.write('{}\n'.format(id)) print('Write black list to {}'.format(black_list_csv))