response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Adds all current event devices to the global dict of event devices.
Returns:
The number of event devices connected, at the time UKIP was started.
Raises:
TypeError: If there is an error in converting the PID/VID of a USB device.
ValueError: If there is an error in converting the PID/VID of a USB device.
RuntimeError: If there is an error in launching the thread.
DeviceError: If there is an error in creating the device list. | def init_device_list() -> int:
"""Adds all current event devices to the global dict of event devices.
Returns:
The number of event devices connected, at the time UKIP was started.
Raises:
TypeError: If there is an error in converting the PID/VID of a USB device.
ValueError: If there is an error in converting the PID/VID of a USB device.
RuntimeError: If there is an error in launching the thread.
DeviceError: If there is an error in creating the device list.
"""
device_count = 0
try:
local_device_context = pyudev.Context()
local_device_monitor = pyudev.Monitor.from_netlink(local_device_context)
local_device_monitor.filter_by(subsystem='input')
except (ValueError, EnvironmentError, DeviceError) as mex:
log.warning(
'There was an error creating the initial list of USB devices: %s', mex)
raise DeviceError('The device context and monitor could not be created.')
for device in local_device_context.list_devices():
if device.device_node and device.device_node.startswith(
'/dev/input/event') and (device.get('ID_VENDOR_ID') and
device.get('ID_MODEL_ID')):
try:
vendor_id = int(device.get('ID_VENDOR_ID'), 16)
product_id = int(device.get('ID_MODEL_ID'), 16)
except (TypeError, ValueError) as mex:
log.error(
'There was an error in converting the PID and VID of a USB device: '
'%s', mex)
continue
try:
threading.Thread(
target=monitor_device_thread,
args=(device, vendor_id, product_id)).start()
device_count += 1
except RuntimeError as e:
log.error(
'There was an runtime error in starting the monitoring thread %s',
e)
return device_count |
Saves given data as a .pkl (pickle) file
Paramters:
data(dict):
Dictionary containing all the necessary data to save | def save_data(data):
"""
Saves given data as a .pkl (pickle) file
Paramters:
data(dict):
Dictionary containing all the necessary data to save
"""
# Open data file, create it if it does not exist
with open('data.pkl', 'wb') as data_file:
pickle.dump(data, data_file) |
Loads saved pkl file and returns the stored data
Returns(dict):
Dictionary containing all the saved data | def load_data() -> dict:
"""
Loads saved pkl file and returns the stored data
Returns(dict):
Dictionary containing all the saved data
"""
try:
with open('data.pkl', 'rb') as data_file: # Open data file
data = pickle.load(data_file)
return data
except (ValueError, FileNotFoundError):
# Data File is corrupted or not found so recreate it
save_data(data=DEFAULT_DATA)
return load_data() |
Get the model hash dictionary | def load_model_hash_data(dictionary):
'''Get the model hash dictionary'''
with open(dictionary, 'r') as d:
return json.load(d) |
Attempts to decrypt VIP model link with given input code | def vip_downloads(password, link_type=VIP_REPO):
"""Attempts to decrypt VIP model link with given input code"""
try:
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=link_type[0],
iterations=390000,)
key = base64.urlsafe_b64encode(kdf.derive(bytes(password, 'utf-8')))
f = Fernet(key)
return str(f.decrypt(link_type[1]), 'UTF-8')
except Exception:
return NO_CODE |
Apply model to a given mixture.
Args:
shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec
and apply the oppositve shift to the output. This is repeated `shifts` time and
all predictions are averaged. This effectively makes the model time equivariant
and improves SDR by up to 0.2 points.
split (bool): if True, the input will be broken down in 8 seconds extracts
and predictions will be performed individually on each and concatenated.
Useful for model with large memory footprint like Tasnet.
progress (bool): if True, show a progress bar (requires split=True)
device (torch.device, str, or None): if provided, device on which to
execute the computation, otherwise `mix.device` is assumed.
When `device` is different from `mix.device`, only local computations will
be on `device`, while the entire tracks will be stored on `mix.device`. | def apply_model(model,
mix,
shifts=1,
split=True,
overlap=0.25,
transition_power=1.,
static_shifts=1,
set_progress_bar=None,
device=None,
progress=False,
num_workers=0,
pool=None):
"""
Apply model to a given mixture.
Args:
shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec
and apply the oppositve shift to the output. This is repeated `shifts` time and
all predictions are averaged. This effectively makes the model time equivariant
and improves SDR by up to 0.2 points.
split (bool): if True, the input will be broken down in 8 seconds extracts
and predictions will be performed individually on each and concatenated.
Useful for model with large memory footprint like Tasnet.
progress (bool): if True, show a progress bar (requires split=True)
device (torch.device, str, or None): if provided, device on which to
execute the computation, otherwise `mix.device` is assumed.
When `device` is different from `mix.device`, only local computations will
be on `device`, while the entire tracks will be stored on `mix.device`.
"""
global fut_length
global bag_num
global prog_bar
if device is None:
device = mix.device
else:
device = th.device(device)
if pool is None:
if num_workers > 0 and device.type == 'cpu':
pool = ThreadPoolExecutor(num_workers)
else:
pool = DummyPoolExecutor()
kwargs = {
'shifts': shifts,
'split': split,
'overlap': overlap,
'transition_power': transition_power,
'progress': progress,
'device': device,
'pool': pool,
'set_progress_bar': set_progress_bar,
'static_shifts': static_shifts,
}
if isinstance(model, BagOfModels):
# Special treatment for bag of model.
# We explicitely apply multiple times `apply_model` so that the random shifts
# are different for each model.
estimates = 0
totals = [0] * len(model.sources)
bag_num = len(model.models)
fut_length = 0
prog_bar = 0
current_model = 0 #(bag_num + 1)
for sub_model, weight in zip(model.models, model.weights):
original_model_device = next(iter(sub_model.parameters())).device
sub_model.to(device)
fut_length += fut_length
current_model += 1
out = apply_model(sub_model, mix, **kwargs)
sub_model.to(original_model_device)
for k, inst_weight in enumerate(weight):
out[:, k, :, :] *= inst_weight
totals[k] += inst_weight
estimates += out
del out
for k in range(estimates.shape[1]):
estimates[:, k, :, :] /= totals[k]
return estimates
model.to(device)
model.eval()
assert transition_power >= 1, "transition_power < 1 leads to weird behavior."
batch, channels, length = mix.shape
if shifts:
kwargs['shifts'] = 0
max_shift = int(0.5 * model.samplerate)
mix = tensor_chunk(mix)
padded_mix = mix.padded(length + 2 * max_shift)
out = 0
for _ in range(shifts):
offset = random.randint(0, max_shift)
shifted = TensorChunk(padded_mix, offset, length + max_shift - offset)
shifted_out = apply_model(model, shifted, **kwargs)
out += shifted_out[..., max_shift - offset:]
out /= shifts
return out
elif split:
kwargs['split'] = False
out = th.zeros(batch, len(model.sources), channels, length, device=mix.device)
sum_weight = th.zeros(length, device=mix.device)
segment = int(model.samplerate * model.segment)
stride = int((1 - overlap) * segment)
offsets = range(0, length, stride)
scale = float(format(stride / model.samplerate, ".2f"))
# We start from a triangle shaped weight, with maximal weight in the middle
# of the segment. Then we normalize and take to the power `transition_power`.
# Large values of transition power will lead to sharper transitions.
weight = th.cat([th.arange(1, segment // 2 + 1, device=device),
th.arange(segment - segment // 2, 0, -1, device=device)])
assert len(weight) == segment
# If the overlap < 50%, this will translate to linear transition when
# transition_power is 1.
weight = (weight / weight.max())**transition_power
futures = []
for offset in offsets:
chunk = TensorChunk(mix, offset, segment)
future = pool.submit(apply_model, model, chunk, **kwargs)
futures.append((future, offset))
offset += segment
if progress:
futures = tqdm.tqdm(futures, unit_scale=scale, ncols=120, unit='seconds')
for future, offset in futures:
if set_progress_bar:
fut_length = (len(futures) * bag_num * static_shifts)
prog_bar += 1
set_progress_bar(0.1, (0.8/fut_length*prog_bar))
chunk_out = future.result()
chunk_length = chunk_out.shape[-1]
out[..., offset:offset + segment] += (weight[:chunk_length] * chunk_out).to(mix.device)
sum_weight[offset:offset + segment] += weight[:chunk_length].to(mix.device)
assert sum_weight.min() > 0
out /= sum_weight
return out
else:
if hasattr(model, 'valid_length'):
valid_length = model.valid_length(length)
else:
valid_length = length
mix = tensor_chunk(mix)
padded_mix = mix.padded(valid_length).to(device)
with th.no_grad():
out = model(padded_mix)
return center_trim(out, length) |
Rescale initial weight scale. It is unclear why it helps but it certainly does.
| def rescale_conv(conv, reference):
"""Rescale initial weight scale. It is unclear why it helps but it certainly does.
"""
std = conv.weight.std().detach()
scale = (std / reference)**0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale |
Element-wise arctangent function of y/x.
Returns a new tensor with signed angles in radians.
It is an alternative implementation of torch.atan2
Args:
y (Tensor): First input tensor
x (Tensor): Second input tensor [shape=y.shape]
Returns:
Tensor: [shape=y.shape]. | def atan2(y, x):
r"""Element-wise arctangent function of y/x.
Returns a new tensor with signed angles in radians.
It is an alternative implementation of torch.atan2
Args:
y (Tensor): First input tensor
x (Tensor): Second input tensor [shape=y.shape]
Returns:
Tensor: [shape=y.shape].
"""
pi = 2 * torch.asin(torch.tensor(1.0))
x += ((x == 0) & (y == 0)) * 1.0
out = torch.atan(y / x)
out += ((y >= 0) & (x < 0)) * pi
out -= ((y < 0) & (x < 0)) * pi
out *= 1 - ((y > 0) & (x == 0)) * 1.0
out += ((y > 0) & (x == 0)) * (pi / 2)
out *= 1 - ((y < 0) & (x == 0)) * 1.0
out += ((y < 0) & (x == 0)) * (-pi / 2)
return out |
Computes the norm value of a torch Tensor, assuming that it
comes as real and imaginary part in its last dimension.
Args:
x (Tensor): Input Tensor of shape [shape=(..., 2)]
Returns:
Tensor: shape as x excluding the last dimension. | def _norm(x: torch.Tensor) -> torch.Tensor:
r"""Computes the norm value of a torch Tensor, assuming that it
comes as real and imaginary part in its last dimension.
Args:
x (Tensor): Input Tensor of shape [shape=(..., 2)]
Returns:
Tensor: shape as x excluding the last dimension.
"""
return torch.abs(x[..., 0]) ** 2 + torch.abs(x[..., 1]) ** 2 |
Element-wise multiplication of two complex Tensors described
through their real and imaginary parts.
The result is added to the `out` tensor | def _mul_add(a: torch.Tensor, b: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Element-wise multiplication of two complex Tensors described
through their real and imaginary parts.
The result is added to the `out` tensor"""
# check `out` and allocate it if needed
target_shape = torch.Size([max(sa, sb) for (sa, sb) in zip(a.shape, b.shape)])
if out is None or out.shape != target_shape:
out = torch.zeros(target_shape, dtype=a.dtype, device=a.device)
if out is a:
real_a = a[..., 0]
out[..., 0] = out[..., 0] + (real_a * b[..., 0] - a[..., 1] * b[..., 1])
out[..., 1] = out[..., 1] + (real_a * b[..., 1] + a[..., 1] * b[..., 0])
else:
out[..., 0] = out[..., 0] + (a[..., 0] * b[..., 0] - a[..., 1] * b[..., 1])
out[..., 1] = out[..., 1] + (a[..., 0] * b[..., 1] + a[..., 1] * b[..., 0])
return out |
Element-wise multiplication of two complex Tensors described
through their real and imaginary parts
can work in place in case out is a only | def _mul(a: torch.Tensor, b: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Element-wise multiplication of two complex Tensors described
through their real and imaginary parts
can work in place in case out is a only"""
target_shape = torch.Size([max(sa, sb) for (sa, sb) in zip(a.shape, b.shape)])
if out is None or out.shape != target_shape:
out = torch.zeros(target_shape, dtype=a.dtype, device=a.device)
if out is a:
real_a = a[..., 0]
out[..., 0] = real_a * b[..., 0] - a[..., 1] * b[..., 1]
out[..., 1] = real_a * b[..., 1] + a[..., 1] * b[..., 0]
else:
out[..., 0] = a[..., 0] * b[..., 0] - a[..., 1] * b[..., 1]
out[..., 1] = a[..., 0] * b[..., 1] + a[..., 1] * b[..., 0]
return out |
Element-wise multiplicative inverse of a Tensor with complex
entries described through their real and imaginary parts.
can work in place in case out is z | def _inv(z: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Element-wise multiplicative inverse of a Tensor with complex
entries described through their real and imaginary parts.
can work in place in case out is z"""
ez = _norm(z)
if out is None or out.shape != z.shape:
out = torch.zeros_like(z)
out[..., 0] = z[..., 0] / ez
out[..., 1] = -z[..., 1] / ez
return out |
Element-wise complex conjugate of a Tensor with complex entries
described through their real and imaginary parts.
can work in place in case out is z | def _conj(z, out: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Element-wise complex conjugate of a Tensor with complex entries
described through their real and imaginary parts.
can work in place in case out is z"""
if out is None or out.shape != z.shape:
out = torch.zeros_like(z)
out[..., 0] = z[..., 0]
out[..., 1] = -z[..., 1]
return out |
Invert 1x1 or 2x2 matrices
Will generate errors if the matrices are singular: user must handle this
through his own regularization schemes.
Args:
M (Tensor): [shape=(..., nb_channels, nb_channels, 2)]
matrices to invert: must be square along dimensions -3 and -2
Returns:
invM (Tensor): [shape=M.shape]
inverses of M | def _invert(M: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Invert 1x1 or 2x2 matrices
Will generate errors if the matrices are singular: user must handle this
through his own regularization schemes.
Args:
M (Tensor): [shape=(..., nb_channels, nb_channels, 2)]
matrices to invert: must be square along dimensions -3 and -2
Returns:
invM (Tensor): [shape=M.shape]
inverses of M
"""
nb_channels = M.shape[-2]
if out is None or out.shape != M.shape:
out = torch.empty_like(M)
if nb_channels == 1:
# scalar case
out = _inv(M, out)
elif nb_channels == 2:
# two channels case: analytical expression
# first compute the determinent
det = _mul(M[..., 0, 0, :], M[..., 1, 1, :])
det = det - _mul(M[..., 0, 1, :], M[..., 1, 0, :])
# invert it
invDet = _inv(det)
# then fill out the matrix with the inverse
out[..., 0, 0, :] = _mul(invDet, M[..., 1, 1, :], out[..., 0, 0, :])
out[..., 1, 0, :] = _mul(-invDet, M[..., 1, 0, :], out[..., 1, 0, :])
out[..., 0, 1, :] = _mul(-invDet, M[..., 0, 1, :], out[..., 0, 1, :])
out[..., 1, 1, :] = _mul(invDet, M[..., 0, 0, :], out[..., 1, 1, :])
else:
raise Exception("Only 2 channels are supported for the torch version.")
return out |
Expectation maximization algorithm, for refining source separation
estimates.
This algorithm allows to make source separation results better by
enforcing multichannel consistency for the estimates. This usually means
a better perceptual quality in terms of spatial artifacts.
The implementation follows the details presented in [1]_, taking
inspiration from the original EM algorithm proposed in [2]_ and its
weighted refinement proposed in [3]_, [4]_.
It works by iteratively:
* Re-estimate source parameters (power spectral densities and spatial
covariance matrices) through :func:`get_local_gaussian_model`.
* Separate again the mixture with the new parameters by first computing
the new modelled mixture covariance matrices with :func:`get_mix_model`,
prepare the Wiener filters through :func:`wiener_gain` and apply them
with :func:`apply_filter``.
References
----------
.. [1] S. Uhlich and M. Porcu and F. Giron and M. Enenkl and T. Kemp and
N. Takahashi and Y. Mitsufuji, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] N.Q. Duong and E. Vincent and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [3] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [4] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [5] A. Liutkus and R. Badeau and G. Richard "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Args:
y (Tensor): [shape=(nb_frames, nb_bins, nb_channels, 2, nb_sources)]
initial estimates for the sources
x (Tensor): [shape=(nb_frames, nb_bins, nb_channels, 2)]
complex STFT of the mixture signal
iterations (int): [scalar]
number of iterations for the EM algorithm.
eps (float or None): [scalar]
The epsilon value to use for regularization and filters.
Returns:
y (Tensor): [shape=(nb_frames, nb_bins, nb_channels, 2, nb_sources)]
estimated sources after iterations
v (Tensor): [shape=(nb_frames, nb_bins, nb_sources)]
estimated power spectral densities
R (Tensor): [shape=(nb_bins, nb_channels, nb_channels, 2, nb_sources)]
estimated spatial covariance matrices
Notes:
* You need an initial estimate for the sources to apply this
algorithm. This is precisely what the :func:`wiener` function does.
* This algorithm *is not* an implementation of the "exact" EM
proposed in [1]_. In particular, it does compute the posterior
covariance matrices the same (exact) way. Instead, it uses the
simplified approximate scheme initially proposed in [5]_ and further
refined in [3]_, [4]_, that boils down to just take the empirical
covariance of the recent source estimates, followed by a weighted
average for the update of the spatial covariance matrix. It has been
empirically demonstrated that this simplified algorithm is more
robust for music separation.
Warning:
It is *very* important to make sure `x.dtype` is `torch.float64`
if you want double precision, because this function will **not**
do such conversion for you from `torch.complex32`, in case you want the
smaller RAM usage on purpose.
It is usually always better in terms of quality to have double
precision, by e.g. calling :func:`expectation_maximization`
with ``x.to(torch.float64)``. | def expectation_maximization(
y: torch.Tensor,
x: torch.Tensor,
iterations: int = 2,
eps: float = 1e-10,
batch_size: int = 200,
):
r"""Expectation maximization algorithm, for refining source separation
estimates.
This algorithm allows to make source separation results better by
enforcing multichannel consistency for the estimates. This usually means
a better perceptual quality in terms of spatial artifacts.
The implementation follows the details presented in [1]_, taking
inspiration from the original EM algorithm proposed in [2]_ and its
weighted refinement proposed in [3]_, [4]_.
It works by iteratively:
* Re-estimate source parameters (power spectral densities and spatial
covariance matrices) through :func:`get_local_gaussian_model`.
* Separate again the mixture with the new parameters by first computing
the new modelled mixture covariance matrices with :func:`get_mix_model`,
prepare the Wiener filters through :func:`wiener_gain` and apply them
with :func:`apply_filter``.
References
----------
.. [1] S. Uhlich and M. Porcu and F. Giron and M. Enenkl and T. Kemp and
N. Takahashi and Y. Mitsufuji, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] N.Q. Duong and E. Vincent and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [3] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [4] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [5] A. Liutkus and R. Badeau and G. Richard "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Args:
y (Tensor): [shape=(nb_frames, nb_bins, nb_channels, 2, nb_sources)]
initial estimates for the sources
x (Tensor): [shape=(nb_frames, nb_bins, nb_channels, 2)]
complex STFT of the mixture signal
iterations (int): [scalar]
number of iterations for the EM algorithm.
eps (float or None): [scalar]
The epsilon value to use for regularization and filters.
Returns:
y (Tensor): [shape=(nb_frames, nb_bins, nb_channels, 2, nb_sources)]
estimated sources after iterations
v (Tensor): [shape=(nb_frames, nb_bins, nb_sources)]
estimated power spectral densities
R (Tensor): [shape=(nb_bins, nb_channels, nb_channels, 2, nb_sources)]
estimated spatial covariance matrices
Notes:
* You need an initial estimate for the sources to apply this
algorithm. This is precisely what the :func:`wiener` function does.
* This algorithm *is not* an implementation of the "exact" EM
proposed in [1]_. In particular, it does compute the posterior
covariance matrices the same (exact) way. Instead, it uses the
simplified approximate scheme initially proposed in [5]_ and further
refined in [3]_, [4]_, that boils down to just take the empirical
covariance of the recent source estimates, followed by a weighted
average for the update of the spatial covariance matrix. It has been
empirically demonstrated that this simplified algorithm is more
robust for music separation.
Warning:
It is *very* important to make sure `x.dtype` is `torch.float64`
if you want double precision, because this function will **not**
do such conversion for you from `torch.complex32`, in case you want the
smaller RAM usage on purpose.
It is usually always better in terms of quality to have double
precision, by e.g. calling :func:`expectation_maximization`
with ``x.to(torch.float64)``.
"""
# dimensions
(nb_frames, nb_bins, nb_channels) = x.shape[:-1]
nb_sources = y.shape[-1]
regularization = torch.cat(
(
torch.eye(nb_channels, dtype=x.dtype, device=x.device)[..., None],
torch.zeros((nb_channels, nb_channels, 1), dtype=x.dtype, device=x.device),
),
dim=2,
)
regularization = torch.sqrt(torch.as_tensor(eps)) * (
regularization[None, None, ...].expand((-1, nb_bins, -1, -1, -1))
)
# allocate the spatial covariance matrices
R = [
torch.zeros((nb_bins, nb_channels, nb_channels, 2), dtype=x.dtype, device=x.device)
for j in range(nb_sources)
]
weight: torch.Tensor = torch.zeros((nb_bins,), dtype=x.dtype, device=x.device)
v: torch.Tensor = torch.zeros((nb_frames, nb_bins, nb_sources), dtype=x.dtype, device=x.device)
for it in range(iterations):
# constructing the mixture covariance matrix. Doing it with a loop
# to avoid storing anytime in RAM the whole 6D tensor
# update the PSD as the average spectrogram over channels
v = torch.mean(torch.abs(y[..., 0, :]) ** 2 + torch.abs(y[..., 1, :]) ** 2, dim=-2)
# update spatial covariance matrices (weighted update)
for j in range(nb_sources):
R[j] = torch.tensor(0.0, device=x.device)
weight = torch.tensor(eps, device=x.device)
pos: int = 0
batch_size = batch_size if batch_size else nb_frames
while pos < nb_frames:
t = torch.arange(pos, min(nb_frames, pos + batch_size))
pos = int(t[-1]) + 1
R[j] = R[j] + torch.sum(_covariance(y[t, ..., j]), dim=0)
weight = weight + torch.sum(v[t, ..., j], dim=0)
R[j] = R[j] / weight[..., None, None, None]
weight = torch.zeros_like(weight)
# cloning y if we track gradient, because we're going to update it
if y.requires_grad:
y = y.clone()
pos = 0
while pos < nb_frames:
t = torch.arange(pos, min(nb_frames, pos + batch_size))
pos = int(t[-1]) + 1
y[t, ...] = torch.tensor(0.0, device=x.device, dtype=x.dtype)
# compute mix covariance matrix
Cxx = regularization
for j in range(nb_sources):
Cxx = Cxx + (v[t, ..., j, None, None, None] * R[j][None, ...].clone())
# invert it
inv_Cxx = _invert(Cxx)
# separate the sources
for j in range(nb_sources):
# create a wiener gain for this source
gain = torch.zeros_like(inv_Cxx)
# computes multichannel Wiener gain as v_j R_j inv_Cxx
indices = torch.cartesian_prod(
torch.arange(nb_channels),
torch.arange(nb_channels),
torch.arange(nb_channels),
)
for index in indices:
gain[:, :, index[0], index[1], :] = _mul_add(
R[j][None, :, index[0], index[2], :].clone(),
inv_Cxx[:, :, index[2], index[1], :],
gain[:, :, index[0], index[1], :],
)
gain = gain * v[t, ..., None, None, None, j]
# apply it to the mixture
for i in range(nb_channels):
y[t, ..., j] = _mul_add(gain[..., i, :], x[t, ..., i, None, :], y[t, ..., j])
return y, v, R |
Wiener-based separation for multichannel audio.
The method uses the (possibly multichannel) spectrograms of the
sources to separate the (complex) Short Term Fourier Transform of the
mix. Separation is done in a sequential way by:
* Getting an initial estimate. This can be done in two ways: either by
directly using the spectrograms with the mixture phase, or
by using a softmasking strategy. This initial phase is controlled
by the `softmask` flag.
* If required, adding an additional residual target as the mix minus
all targets.
* Refinining these initial estimates through a call to
:func:`expectation_maximization` if the number of iterations is nonzero.
This implementation also allows to specify the epsilon value used for
regularization. It is based on [1]_, [2]_, [3]_, [4]_.
References
----------
.. [1] S. Uhlich and M. Porcu and F. Giron and M. Enenkl and T. Kemp and
N. Takahashi and Y. Mitsufuji, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [3] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [4] A. Liutkus and R. Badeau and G. Richard "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Args:
targets_spectrograms (Tensor): spectrograms of the sources
[shape=(nb_frames, nb_bins, nb_channels, nb_sources)].
This is a nonnegative tensor that is
usually the output of the actual separation method of the user. The
spectrograms may be mono, but they need to be 4-dimensional in all
cases.
mix_stft (Tensor): [shape=(nb_frames, nb_bins, nb_channels, complex=2)]
STFT of the mixture signal.
iterations (int): [scalar]
number of iterations for the EM algorithm
softmask (bool): Describes how the initial estimates are obtained.
* if `False`, then the mixture phase will directly be used with the
spectrogram as initial estimates.
* if `True`, initial estimates are obtained by multiplying the
complex mix element-wise with the ratio of each target spectrogram
with the sum of them all. This strategy is better if the model are
not really good, and worse otherwise.
residual (bool): if `True`, an additional target is created, which is
equal to the mixture minus the other targets, before application of
expectation maximization
eps (float): Epsilon value to use for computing the separations.
This is used whenever division with a model energy is
performed, i.e. when softmasking and when iterating the EM.
It can be understood as the energy of the additional white noise
that is taken out when separating.
Returns:
Tensor: shape=(nb_frames, nb_bins, nb_channels, complex=2, nb_sources)
STFT of estimated sources
Notes:
* Be careful that you need *magnitude spectrogram estimates* for the
case `softmask==False`.
* `softmask=False` is recommended
* The epsilon value will have a huge impact on performance. If it's
large, only the parts of the signal with a significant energy will
be kept in the sources. This epsilon then directly controls the
energy of the reconstruction error.
Warning:
As in :func:`expectation_maximization`, we recommend converting the
mixture `x` to double precision `torch.float64` *before* calling
:func:`wiener`. | def wiener(
targets_spectrograms: torch.Tensor,
mix_stft: torch.Tensor,
iterations: int = 1,
softmask: bool = False,
residual: bool = False,
scale_factor: float = 10.0,
eps: float = 1e-10,
):
"""Wiener-based separation for multichannel audio.
The method uses the (possibly multichannel) spectrograms of the
sources to separate the (complex) Short Term Fourier Transform of the
mix. Separation is done in a sequential way by:
* Getting an initial estimate. This can be done in two ways: either by
directly using the spectrograms with the mixture phase, or
by using a softmasking strategy. This initial phase is controlled
by the `softmask` flag.
* If required, adding an additional residual target as the mix minus
all targets.
* Refinining these initial estimates through a call to
:func:`expectation_maximization` if the number of iterations is nonzero.
This implementation also allows to specify the epsilon value used for
regularization. It is based on [1]_, [2]_, [3]_, [4]_.
References
----------
.. [1] S. Uhlich and M. Porcu and F. Giron and M. Enenkl and T. Kemp and
N. Takahashi and Y. Mitsufuji, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [3] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [4] A. Liutkus and R. Badeau and G. Richard "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Args:
targets_spectrograms (Tensor): spectrograms of the sources
[shape=(nb_frames, nb_bins, nb_channels, nb_sources)].
This is a nonnegative tensor that is
usually the output of the actual separation method of the user. The
spectrograms may be mono, but they need to be 4-dimensional in all
cases.
mix_stft (Tensor): [shape=(nb_frames, nb_bins, nb_channels, complex=2)]
STFT of the mixture signal.
iterations (int): [scalar]
number of iterations for the EM algorithm
softmask (bool): Describes how the initial estimates are obtained.
* if `False`, then the mixture phase will directly be used with the
spectrogram as initial estimates.
* if `True`, initial estimates are obtained by multiplying the
complex mix element-wise with the ratio of each target spectrogram
with the sum of them all. This strategy is better if the model are
not really good, and worse otherwise.
residual (bool): if `True`, an additional target is created, which is
equal to the mixture minus the other targets, before application of
expectation maximization
eps (float): Epsilon value to use for computing the separations.
This is used whenever division with a model energy is
performed, i.e. when softmasking and when iterating the EM.
It can be understood as the energy of the additional white noise
that is taken out when separating.
Returns:
Tensor: shape=(nb_frames, nb_bins, nb_channels, complex=2, nb_sources)
STFT of estimated sources
Notes:
* Be careful that you need *magnitude spectrogram estimates* for the
case `softmask==False`.
* `softmask=False` is recommended
* The epsilon value will have a huge impact on performance. If it's
large, only the parts of the signal with a significant energy will
be kept in the sources. This epsilon then directly controls the
energy of the reconstruction error.
Warning:
As in :func:`expectation_maximization`, we recommend converting the
mixture `x` to double precision `torch.float64` *before* calling
:func:`wiener`.
"""
if softmask:
# if we use softmask, we compute the ratio mask for all targets and
# multiply by the mix stft
y = (
mix_stft[..., None]
* (
targets_spectrograms
/ (eps + torch.sum(targets_spectrograms, dim=-1, keepdim=True).to(mix_stft.dtype))
)[..., None, :]
)
else:
# otherwise, we just multiply the targets spectrograms with mix phase
# we tacitly assume that we have magnitude estimates.
angle = atan2(mix_stft[..., 1], mix_stft[..., 0])[..., None]
nb_sources = targets_spectrograms.shape[-1]
y = torch.zeros(
mix_stft.shape + (nb_sources,), dtype=mix_stft.dtype, device=mix_stft.device
)
y[..., 0, :] = targets_spectrograms * torch.cos(angle)
y[..., 1, :] = targets_spectrograms * torch.sin(angle)
if residual:
# if required, adding an additional target as the mix minus
# available targets
y = torch.cat([y, mix_stft[..., None] - y.sum(dim=-1, keepdim=True)], dim=-1)
if iterations == 0:
return y
# we need to refine the estimates. Scales down the estimates for
# numerical stability
max_abs = torch.max(
torch.as_tensor(1.0, dtype=mix_stft.dtype, device=mix_stft.device),
torch.sqrt(_norm(mix_stft)).max() / scale_factor,
)
mix_stft = mix_stft / max_abs
y = y / max_abs
# call expectation maximization
y = expectation_maximization(y, mix_stft, iterations, eps=eps)[0]
# scale estimates up again
y = y * max_abs
return y |
Compute the empirical covariance for a source.
Args:
y_j (Tensor): complex stft of the source.
[shape=(nb_frames, nb_bins, nb_channels, 2)].
Returns:
Cj (Tensor): [shape=(nb_frames, nb_bins, nb_channels, nb_channels, 2)]
just y_j * conj(y_j.T): empirical covariance for each TF bin. | def _covariance(y_j):
"""
Compute the empirical covariance for a source.
Args:
y_j (Tensor): complex stft of the source.
[shape=(nb_frames, nb_bins, nb_channels, 2)].
Returns:
Cj (Tensor): [shape=(nb_frames, nb_bins, nb_channels, nb_channels, 2)]
just y_j * conj(y_j.T): empirical covariance for each TF bin.
"""
(nb_frames, nb_bins, nb_channels) = y_j.shape[:-1]
Cj = torch.zeros(
(nb_frames, nb_bins, nb_channels, nb_channels, 2),
dtype=y_j.dtype,
device=y_j.device,
)
indices = torch.cartesian_prod(torch.arange(nb_channels), torch.arange(nb_channels))
for index in indices:
Cj[:, :, index[0], index[1], :] = _mul_add(
y_j[:, :, index[0], :],
_conj(y_j[:, :, index[1], :]),
Cj[:, :, index[0], index[1], :],
)
return Cj |
Tiny wrapper around F.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happen. | def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.):
"""Tiny wrapper around F.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happen."""
x0 = x
length = x.shape[-1]
padding_left, padding_right = paddings
if mode == 'reflect':
max_pad = max(padding_left, padding_right)
if length <= max_pad:
extra_pad = max_pad - length + 1
extra_pad_right = min(padding_right, extra_pad)
extra_pad_left = extra_pad - extra_pad_right
paddings = (padding_left - extra_pad_left, padding_right - extra_pad_right)
x = F.pad(x, (extra_pad_left, extra_pad_right))
out = F.pad(x, paddings, mode, value)
assert out.shape[-1] == length + padding_left + padding_right
assert (out[..., padding_left: padding_left + length] == x0).all()
return out |
Linear upsampling, the output will be `stride` times longer. | def upsample(x, stride):
"""
Linear upsampling, the output will be `stride` times longer.
"""
batch, channels, time = x.size()
weight = th.arange(stride, device=x.device, dtype=th.float) / stride
x = x.view(batch, channels, time, 1)
out = x[..., :-1, :] * (1 - weight) + x[..., 1:, :] * weight
return out.reshape(batch, channels, -1) |
Downsample x by decimation. | def downsample(x, stride):
"""
Downsample x by decimation.
"""
return x[:, :, ::stride] |
`name` must be a bag of models name or a pretrained signature
from the remote AWS model repo or the specified local repo if `repo` is not None. | def get_model(name: str,
repo: tp.Optional[Path] = None):
"""`name` must be a bag of models name or a pretrained signature
from the remote AWS model repo or the specified local repo if `repo` is not None.
"""
if name == 'demucs_unittest':
return demucs_unittest()
model_repo: ModelOnlyRepo
if repo is None:
models = _parse_remote_files(REMOTE_ROOT / 'files.txt')
model_repo = RemoteRepo(models)
bag_repo = BagOnlyRepo(REMOTE_ROOT, model_repo)
else:
if not repo.is_dir():
fatal(f"{repo} must exist and be a directory.")
model_repo = LocalRepo(repo)
bag_repo = BagOnlyRepo(repo, model_repo)
any_repo = AnyModelRepo(model_repo, bag_repo)
model = any_repo.get_model(name)
model.eval()
return model |
Load local model package or pre-trained model. | def get_model_from_args(args):
"""
Load local model package or pre-trained model.
"""
return get_model(name=args.name, repo=args.repo) |
Return the quantizer given the XP quantization args. | def get_quantizer(model, args, optimizer=None):
"""Return the quantizer given the XP quantization args."""
quantizer = None
if args.diffq:
quantizer = DiffQuantizer(
model, min_size=args.min_size, group_size=args.group_size)
if optimizer is not None:
quantizer.setup_optimizer(optimizer)
elif args.qat:
quantizer = UniformQuantizer(
model, bits=args.qat, min_size=args.min_size)
return quantizer |
Load a model from the given serialized model, either given as a dict (already loaded)
or a path to a file on disk. | def load_model(path_or_package, strict=False):
"""Load a model from the given serialized model, either given as a dict (already loaded)
or a path to a file on disk."""
if isinstance(path_or_package, dict):
package = path_or_package
elif isinstance(path_or_package, (str, Path)):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
path = path_or_package
package = torch.load(path, 'cpu')
else:
raise ValueError(f"Invalid type for {path_or_package}.")
klass = package["klass"]
args = package["args"]
kwargs = package["kwargs"]
if strict:
model = klass(*args, **kwargs)
else:
sig = inspect.signature(klass)
for key in list(kwargs):
if key not in sig.parameters:
warnings.warn("Dropping inexistant parameter " + key)
del kwargs[key]
model = klass(*args, **kwargs)
state = package["state"]
set_state(model, state)
return model |
Get the state from a model, potentially with quantization applied.
If `half` is True, model are stored as half precision, which shouldn't impact performance
but half the state size. | def get_state(model, quantizer, half=False):
"""Get the state from a model, potentially with quantization applied.
If `half` is True, model are stored as half precision, which shouldn't impact performance
but half the state size."""
if quantizer is None:
dtype = torch.half if half else None
state = {k: p.data.to(device='cpu', dtype=dtype) for k, p in model.state_dict().items()}
else:
state = quantizer.get_quantized_state()
state['__quantized'] = True
return state |
Set the state on a given model. | def set_state(model, state, quantizer=None):
"""Set the state on a given model."""
if state.get('__quantized'):
if quantizer is not None:
quantizer.restore_quantized_state(model, state['quantized'])
else:
restore_quantized_state(model, state)
else:
model.load_state_dict(state)
return state |
Save the given value on disk, along with a sha256 hash.
Should be used with the output of either `serialize_model` or `get_state`. | def save_with_checksum(content, path):
"""Save the given value on disk, along with a sha256 hash.
Should be used with the output of either `serialize_model` or `get_state`."""
buf = io.BytesIO()
torch.save(content, buf)
sig = hashlib.sha256(buf.getvalue()).hexdigest()[:8]
path = path.parent / (path.stem + "-" + sig + path.suffix)
path.write_bytes(buf.getvalue()) |
Context manager that swaps the state of a model, e.g:
# model is in old state
with swap_state(model, new_state):
# model in new state
# model back to old state | def swap_state(model, state):
"""
Context manager that swaps the state of a model, e.g:
# model is in old state
with swap_state(model, new_state):
# model in new state
# model back to old state
"""
old_state = copy_state(model.state_dict())
model.load_state_dict(state, strict=False)
try:
yield
finally:
model.load_state_dict(old_state) |
The input of normlization will be (M, C, K), where M is batch size,
C is channel size and K is sequence length. | def chose_norm(norm_type, channel_size):
"""The input of normlization will be (M, C, K), where M is batch size,
C is channel size and K is sequence length.
"""
if norm_type == "gLN":
return GlobalLayerNorm(channel_size)
elif norm_type == "cLN":
return ChannelwiseLayerNorm(channel_size)
elif norm_type == "id":
return nn.Identity()
else: # norm_type == "BN":
# Given input (M, C, K), nn.BatchNorm1d(C) will accumulate statics
# along M and K, so this BN usage is right.
return nn.BatchNorm1d(channel_size) |
The input of normlization will be (M, C, K), where M is batch size,
C is channel size and K is sequence length. | def chose_norm(norm_type, channel_size):
"""The input of normlization will be (M, C, K), where M is batch size,
C is channel size and K is sequence length.
"""
if norm_type == "gLN":
return GlobalLayerNorm(channel_size)
elif norm_type == "cLN":
return ChannelwiseLayerNorm(channel_size)
elif norm_type == "id":
return nn.Identity()
else: # norm_type == "BN":
# Given input (M, C, K), nn.BatchNorm1d(C) will accumulate statics
# along M and K, so this BN usage is right.
return nn.BatchNorm1d(channel_size) |
:param d_model: dimension of the model
:param height: height of the positions
:param width: width of the positions
:return: d_model*height*width position matrix | def create_2d_sin_embedding(d_model, height, width, device="cpu", max_period=10000):
"""
:param d_model: dimension of the model
:param height: height of the positions
:param width: width of the positions
:return: d_model*height*width position matrix
"""
if d_model % 4 != 0:
raise ValueError(
"Cannot use sin/cos positional encoding with "
"odd dimension (got dim={:d})".format(d_model)
)
pe = torch.zeros(d_model, height, width)
# Each dimension use half of d_model
d_model = int(d_model / 2)
div_term = torch.exp(
torch.arange(0.0, d_model, 2) * -(math.log(max_period) / d_model)
)
pos_w = torch.arange(0.0, width).unsqueeze(1)
pos_h = torch.arange(0.0, height).unsqueeze(1)
pe[0:d_model:2, :, :] = (
torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
)
pe[1:d_model:2, :, :] = (
torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
)
pe[d_model::2, :, :] = (
torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
)
pe[d_model + 1:: 2, :, :] = (
torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
)
return pe[None, :].to(device) |
When the input of the Decoder has length T1 and the output T2
The mask matrix has shape (T2, T1) | def get_elementary_mask(
T1,
T2,
mask_type,
sparse_attn_window,
global_window,
mask_random_seed,
sparsity,
device,
):
"""
When the input of the Decoder has length T1 and the output T2
The mask matrix has shape (T2, T1)
"""
assert mask_type in ["diag", "jmask", "random", "global"]
if mask_type == "global":
mask = torch.zeros(T2, T1, dtype=torch.bool)
mask[:, :global_window] = True
line_window = int(global_window * T2 / T1)
mask[:line_window, :] = True
if mask_type == "diag":
mask = torch.zeros(T2, T1, dtype=torch.bool)
rows = torch.arange(T2)[:, None]
cols = (
(T1 / T2 * rows + torch.arange(-sparse_attn_window, sparse_attn_window + 1))
.long()
.clamp(0, T1 - 1)
)
mask.scatter_(1, cols, torch.ones(1, dtype=torch.bool).expand_as(cols))
elif mask_type == "jmask":
mask = torch.zeros(T2 + 2, T1 + 2, dtype=torch.bool)
rows = torch.arange(T2 + 2)[:, None]
t = torch.arange(0, int((2 * T1) ** 0.5 + 1))
t = (t * (t + 1) / 2).int()
t = torch.cat([-t.flip(0)[:-1], t])
cols = (T1 / T2 * rows + t).long().clamp(0, T1 + 1)
mask.scatter_(1, cols, torch.ones(1, dtype=torch.bool).expand_as(cols))
mask = mask[1:-1, 1:-1]
elif mask_type == "random":
gene = torch.Generator(device=device)
gene.manual_seed(mask_random_seed)
mask = (
torch.rand(T1 * T2, generator=gene, device=device).reshape(T2, T1)
> sparsity
)
mask = mask.to(device)
return mask |
Return a SparseCSRTensor mask that is a combination of elementary masks
mask_type can be a combination of multiple masks: for instance "diag_jmask_random" | def get_mask(
T1,
T2,
mask_type,
sparse_attn_window,
global_window,
mask_random_seed,
sparsity,
device,
):
"""
Return a SparseCSRTensor mask that is a combination of elementary masks
mask_type can be a combination of multiple masks: for instance "diag_jmask_random"
"""
from xformers.sparse import SparseCSRTensor
# create a list
mask_types = mask_type.split("_")
all_masks = [
get_elementary_mask(
T1,
T2,
mask,
sparse_attn_window,
global_window,
mask_random_seed,
sparsity,
device,
)
for mask in mask_types
]
final_mask = torch.stack(all_masks).sum(axis=0) > 0
return SparseCSRTensor.from_dense(final_mask[None]) |
Given input of size [*OT, T], output Tensor of size [*OT, F, K]
with K the kernel size, by extracting frames with the given stride.
This will pad the input so that `F = ceil(T / K)`.
see https://github.com/pytorch/pytorch/issues/60466 | def unfold(a, kernel_size, stride):
"""Given input of size [*OT, T], output Tensor of size [*OT, F, K]
with K the kernel size, by extracting frames with the given stride.
This will pad the input so that `F = ceil(T / K)`.
see https://github.com/pytorch/pytorch/issues/60466
"""
*shape, length = a.shape
n_frames = math.ceil(length / stride)
tgt_length = (n_frames - 1) * stride + kernel_size
a = F.pad(a, (0, tgt_length - length))
strides = list(a.stride())
assert strides[-1] == 1, 'data should be contiguous'
strides = strides[:-1] + [stride, 1]
return a.as_strided([*shape, n_frames, kernel_size], strides) |
Center trim `tensor` with respect to `reference`, along the last dimension.
`reference` can also be a number, representing the length to trim to.
If the size difference != 0 mod 2, the extra sample is removed on the right side. | def center_trim(tensor: torch.Tensor, reference: tp.Union[torch.Tensor, int]):
"""
Center trim `tensor` with respect to `reference`, along the last dimension.
`reference` can also be a number, representing the length to trim to.
If the size difference != 0 mod 2, the extra sample is removed on the right side.
"""
ref_size: int
if isinstance(reference, torch.Tensor):
ref_size = reference.size(-1)
else:
ref_size = reference
delta = tensor.size(-1) - ref_size
if delta < 0:
raise ValueError("tensor must be larger than reference. " f"Delta is {delta}.")
if delta:
tensor = tensor[..., delta // 2:-(delta - delta // 2)]
return tensor |
Exponential Moving Average callback.
Returns a single function that can be called to repeatidly update the EMA
with a dict of metrics. The callback will return
the new averaged dict of metrics.
Note that for `beta=1`, this is just plain averaging. | def EMA(beta: float = 1):
"""
Exponential Moving Average callback.
Returns a single function that can be called to repeatidly update the EMA
with a dict of metrics. The callback will return
the new averaged dict of metrics.
Note that for `beta=1`, this is just plain averaging.
"""
fix: tp.Dict[str, float] = defaultdict(float)
total: tp.Dict[str, float] = defaultdict(float)
def _update(metrics: dict, weight: float = 1) -> dict:
nonlocal total, fix
for key, value in metrics.items():
total[key] = total[key] * beta + weight * float(value)
fix[key] = fix[key] * beta + weight
return {key: tot / fix[key] for key, tot in total.items()}
return _update |
Given `num` bytes, return human readable size.
Taken from https://stackoverflow.com/a/1094933 | def sizeof_fmt(num: float, suffix: str = 'B'):
"""
Given `num` bytes, return human readable size.
Taken from https://stackoverflow.com/a/1094933
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) |
Average `metric` which should be a float across all hosts. `count` should be
the weight for this particular host (i.e. number of examples). | def average_metric(metric, count=1.):
"""
Average `metric` which should be a float across all hosts. `count` should be
the weight for this particular host (i.e. number of examples).
"""
metric = th.tensor([count, count * metric], dtype=th.float32, device='cuda')
distributed.all_reduce(metric, op=distributed.ReduceOp.SUM)
return metric[1].item() / metric[0].item() |
Return a port number that is most likely free.
This could suffer from a race condition although
it should be quite rare. | def free_port(host='', low=20000, high=40000):
"""
Return a port number that is most likely free.
This could suffer from a race condition although
it should be quite rare.
"""
sock = socket.socket()
while True:
port = random.randint(low, high)
try:
sock.bind((host, port))
except OSError as error:
if error.errno == errno.EADDRINUSE:
continue
raise
return port |
Given `num` bytes, return human readable size.
Taken from https://stackoverflow.com/a/1094933 | def sizeof_fmt(num, suffix='B'):
"""
Given `num` bytes, return human readable size.
Taken from https://stackoverflow.com/a/1094933
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) |
Given `seconds` seconds, return human readable duration. | def human_seconds(seconds, display='.2f'):
"""
Given `seconds` seconds, return human readable duration.
"""
value = seconds * 1e6
ratios = [1e3, 1e3, 60, 60, 24]
names = ['us', 'ms', 's', 'min', 'hrs', 'days']
last = names.pop(0)
for name, ratio in zip(names, ratios):
if value / ratio < 0.3:
break
value /= ratio
last = name
return f"{format(value, display)} {last}" |
Apply model to a given mixture.
Args:
shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec
and apply the oppositve shift to the output. This is repeated `shifts` time and
all predictions are averaged. This effectively makes the model time equivariant
and improves SDR by up to 0.2 points.
split (bool): if True, the input will be broken down in 8 seconds extracts
and predictions will be performed individually on each and concatenated.
Useful for model with large memory footprint like Tasnet.
progress (bool): if True, show a progress bar (requires split=True) | def apply_model_v1(model, mix, shifts=None, split=False, progress=False, set_progress_bar=None):
"""
Apply model to a given mixture.
Args:
shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec
and apply the oppositve shift to the output. This is repeated `shifts` time and
all predictions are averaged. This effectively makes the model time equivariant
and improves SDR by up to 0.2 points.
split (bool): if True, the input will be broken down in 8 seconds extracts
and predictions will be performed individually on each and concatenated.
Useful for model with large memory footprint like Tasnet.
progress (bool): if True, show a progress bar (requires split=True)
"""
channels, length = mix.size()
device = mix.device
progress_value = 0
if split:
out = th.zeros(4, channels, length, device=device)
shift = model.samplerate * 10
offsets = range(0, length, shift)
scale = 10
if progress:
offsets = tqdm.tqdm(offsets, unit_scale=scale, ncols=120, unit='seconds')
for offset in offsets:
chunk = mix[..., offset:offset + shift]
if set_progress_bar:
progress_value += 1
set_progress_bar(0.1, (0.8/len(offsets)*progress_value))
chunk_out = apply_model_v1(model, chunk, shifts=shifts, set_progress_bar=set_progress_bar)
else:
chunk_out = apply_model_v1(model, chunk, shifts=shifts)
out[..., offset:offset + shift] = chunk_out
offset += shift
return out
elif shifts:
max_shift = int(model.samplerate / 2)
mix = F.pad(mix, (max_shift, max_shift))
offsets = list(range(max_shift))
random.shuffle(offsets)
out = 0
for offset in offsets[:shifts]:
shifted = mix[..., offset:offset + length + max_shift]
if set_progress_bar:
shifted_out = apply_model_v1(model, shifted, set_progress_bar=set_progress_bar)
else:
shifted_out = apply_model_v1(model, shifted)
out += shifted_out[..., max_shift - offset:max_shift - offset + length]
out /= shifts
return out
else:
valid_length = model.valid_length(length)
delta = valid_length - length
padded = F.pad(mix, (delta // 2, delta - delta // 2))
with th.no_grad():
out = model(padded.unsqueeze(0))[0]
return center_trim(out, mix) |
Apply model to a given mixture.
Args:
shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec
and apply the oppositve shift to the output. This is repeated `shifts` time and
all predictions are averaged. This effectively makes the model time equivariant
and improves SDR by up to 0.2 points.
split (bool): if True, the input will be broken down in 8 seconds extracts
and predictions will be performed individually on each and concatenated.
Useful for model with large memory footprint like Tasnet.
progress (bool): if True, show a progress bar (requires split=True) | def apply_model_v2(model, mix, shifts=None, split=False,
overlap=0.25, transition_power=1., progress=False, set_progress_bar=None):
"""
Apply model to a given mixture.
Args:
shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec
and apply the oppositve shift to the output. This is repeated `shifts` time and
all predictions are averaged. This effectively makes the model time equivariant
and improves SDR by up to 0.2 points.
split (bool): if True, the input will be broken down in 8 seconds extracts
and predictions will be performed individually on each and concatenated.
Useful for model with large memory footprint like Tasnet.
progress (bool): if True, show a progress bar (requires split=True)
"""
assert transition_power >= 1, "transition_power < 1 leads to weird behavior."
device = mix.device
channels, length = mix.shape
progress_value = 0
if split:
out = th.zeros(len(model.sources), channels, length, device=device)
sum_weight = th.zeros(length, device=device)
segment = model.segment_length
stride = int((1 - overlap) * segment)
offsets = range(0, length, stride)
scale = stride / model.samplerate
if progress:
offsets = tqdm.tqdm(offsets, unit_scale=scale, ncols=120, unit='seconds')
# We start from a triangle shaped weight, with maximal weight in the middle
# of the segment. Then we normalize and take to the power `transition_power`.
# Large values of transition power will lead to sharper transitions.
weight = th.cat([th.arange(1, segment // 2 + 1),
th.arange(segment - segment // 2, 0, -1)]).to(device)
assert len(weight) == segment
# If the overlap < 50%, this will translate to linear transition when
# transition_power is 1.
weight = (weight / weight.max())**transition_power
for offset in offsets:
chunk = TensorChunk(mix, offset, segment)
if set_progress_bar:
progress_value += 1
set_progress_bar(0.1, (0.8/len(offsets)*progress_value))
chunk_out = apply_model_v2(model, chunk, shifts=shifts, set_progress_bar=set_progress_bar)
else:
chunk_out = apply_model_v2(model, chunk, shifts=shifts)
chunk_length = chunk_out.shape[-1]
out[..., offset:offset + segment] += weight[:chunk_length] * chunk_out
sum_weight[offset:offset + segment] += weight[:chunk_length]
offset += segment
assert sum_weight.min() > 0
out /= sum_weight
return out
elif shifts:
max_shift = int(0.5 * model.samplerate)
mix = tensor_chunk(mix)
padded_mix = mix.padded(length + 2 * max_shift)
out = 0
for _ in range(shifts):
offset = random.randint(0, max_shift)
shifted = TensorChunk(padded_mix, offset, length + max_shift - offset)
if set_progress_bar:
progress_value += 1
shifted_out = apply_model_v2(model, shifted, set_progress_bar=set_progress_bar)
else:
shifted_out = apply_model_v2(model, shifted)
out += shifted_out[..., max_shift - offset:]
out /= shifts
return out
else:
valid_length = model.valid_length(length)
mix = tensor_chunk(mix)
padded_mix = mix.padded(valid_length)
with th.no_grad():
out = model(padded_mix.unsqueeze(0))[0]
return center_trim(out, length) |
Determines secondary stem | def secondary_stem(stem:str):
"""Determines secondary stem"""
stem = stem if stem else NO_STEM
if stem in STEM_PAIR_MAPPER.keys():
for key, value in STEM_PAIR_MAPPER.items():
if stem in key:
secondary_stem = value
else:
secondary_stem = stem.replace(NO_STEM, "") if NO_STEM in stem else f"{NO_STEM}{stem}"
return secondary_stem |
Internal function. | def _require(tkroot):
'''Internal function.'''
global TkdndVersion
try:
import os.path
import platform
if platform.system()=="Darwin":
tkdnd_platform_rep = "osx_arm" if platform.processor() == ARM or ARM in platform.platform() else "osx64"
elif platform.system()=="Linux":
tkdnd_platform_rep = "linux64"
elif platform.system()=="Windows":
tkdnd_platform_rep = "win64"
else:
raise RuntimeError('Plaform not supported.')
module_path = os.path.join(os.path.dirname(__file__), 'tkdnd', tkdnd_platform_rep)
tkroot.tk.call('lappend', 'auto_path', module_path)
TkdndVersion = tkroot.tk.call('package', 'require', 'tkdnd')
except tkinter.TclError:
raise RuntimeError('Unable to load tkdnd library.')
return TkdndVersion |
Normalize audio | def normalize(wave, is_normalize=False):
"""Normalize audio"""
maxv = np.abs(wave).max()
if maxv > 1.0:
if is_normalize:
print("Above clipping threshold.")
wave /= maxv
return wave |
Ensure that the audio array is in the (channels, samples) format.
Parameters:
audio_array (ndarray): Input audio array.
Returns:
ndarray: Transposed audio array if necessary. | def auto_transpose(audio_array:np.ndarray):
"""
Ensure that the audio array is in the (channels, samples) format.
Parameters:
audio_array (ndarray): Input audio array.
Returns:
ndarray: Transposed audio array if necessary.
"""
# If the second dimension is 2 (indicating stereo channels), transpose the array
if audio_array.shape[1] == 2:
return audio_array.T
return audio_array |
Detect silence at the beginning of an audio signal.
:param audio: np.array, audio signal
:param sr: int, sample rate
:param silence_threshold: float, magnitude threshold below which is considered silence
:param frame_length: int, the number of samples to consider for each check
:return: float, duration of the leading silence in milliseconds | def detect_leading_silence(audio, sr, silence_threshold=0.007, frame_length=1024):
"""
Detect silence at the beginning of an audio signal.
:param audio: np.array, audio signal
:param sr: int, sample rate
:param silence_threshold: float, magnitude threshold below which is considered silence
:param frame_length: int, the number of samples to consider for each check
:return: float, duration of the leading silence in milliseconds
"""
if len(audio.shape) == 2:
# If stereo, pick the channel with more energy to determine the silence
channel = np.argmax(np.sum(np.abs(audio), axis=1))
audio = audio[channel]
for i in range(0, len(audio), frame_length):
if np.max(np.abs(audio[i:i+frame_length])) > silence_threshold:
return (i / sr) * 1000
return (len(audio) / sr) * 1000 |
Adjust the leading silence of the target_audio to match the leading silence of the reference_audio.
:param target_audio: np.array, audio signal that will have its silence adjusted
:param reference_audio: np.array, audio signal used as a reference
:param sr: int, sample rate
:param silence_threshold: float, magnitude threshold below which is considered silence
:param frame_length: int, the number of samples to consider for each check
:return: np.array, target_audio adjusted to have the same leading silence as reference_audio | def adjust_leading_silence(target_audio, reference_audio, silence_threshold=0.01, frame_length=1024):
"""
Adjust the leading silence of the target_audio to match the leading silence of the reference_audio.
:param target_audio: np.array, audio signal that will have its silence adjusted
:param reference_audio: np.array, audio signal used as a reference
:param sr: int, sample rate
:param silence_threshold: float, magnitude threshold below which is considered silence
:param frame_length: int, the number of samples to consider for each check
:return: np.array, target_audio adjusted to have the same leading silence as reference_audio
"""
def find_silence_end(audio):
if len(audio.shape) == 2:
# If stereo, pick the channel with more energy to determine the silence
channel = np.argmax(np.sum(np.abs(audio), axis=1))
audio_mono = audio[channel]
else:
audio_mono = audio
for i in range(0, len(audio_mono), frame_length):
if np.max(np.abs(audio_mono[i:i+frame_length])) > silence_threshold:
return i
return len(audio_mono)
ref_silence_end = find_silence_end(reference_audio)
target_silence_end = find_silence_end(target_audio)
silence_difference = ref_silence_end - target_silence_end
try:
ref_silence_end_p = (ref_silence_end / 44100) * 1000
target_silence_end_p = (target_silence_end / 44100) * 1000
silence_difference_p = ref_silence_end_p - target_silence_end_p
print("silence_difference: ", silence_difference_p)
except Exception as e:
pass
if silence_difference > 0: # Add silence to target_audio
if len(target_audio.shape) == 2: # stereo
silence_to_add = np.zeros((target_audio.shape[0], silence_difference))
else: # mono
silence_to_add = np.zeros(silence_difference)
return np.hstack((silence_to_add, target_audio))
elif silence_difference < 0: # Remove silence from target_audio
if len(target_audio.shape) == 2: # stereo
return target_audio[:, -silence_difference:]
else: # mono
return target_audio[-silence_difference:]
else: # No adjustment needed
return target_audio |
This fixture creates a directory structure to enable reload parameter tests
The fixture has the following structure:
root
├── [app, app_first, app_second, app_third]
│ ├── css
│ │ └── main.css
│ ├── js
│ │ └── main.js
│ ├── src
│ │ └── main.py
│ └── sub
│ └── sub.py
├── ext
│ └── ext.jpg
├── .dotted
├── .dotted_dir
│ └── file.txt
└── main.py | def reload_directory_structure(tmp_path_factory: pytest.TempPathFactory):
"""
This fixture creates a directory structure to enable reload parameter tests
The fixture has the following structure:
root
├── [app, app_first, app_second, app_third]
│ ├── css
│ │ └── main.css
│ ├── js
│ │ └── main.js
│ ├── src
│ │ └── main.py
│ └── sub
│ └── sub.py
├── ext
│ └── ext.jpg
├── .dotted
├── .dotted_dir
│ └── file.txt
└── main.py
"""
root = tmp_path_factory.mktemp("reload_directory")
apps = ["app", "app_first", "app_second", "app_third"]
root_file = root / "main.py"
root_file.touch()
dotted_file = root / ".dotted"
dotted_file.touch()
dotted_dir = root / ".dotted_dir"
dotted_dir.mkdir()
dotted_dir_file = dotted_dir / "file.txt"
dotted_dir_file.touch()
for app in apps:
app_path = root / app
app_path.mkdir()
dir_files = [
("src", ["main.py"]),
("js", ["main.js"]),
("css", ["main.css"]),
("sub", ["sub.py"]),
]
for directory, files in dir_files:
directory_path = app_path / directory
directory_path.mkdir()
for file in files:
file_path = directory_path / file
file_path.touch()
ext_dir = root / "ext"
ext_dir.mkdir()
ext_file = ext_dir / "ext.jpg"
ext_file.touch()
yield root |
Find an unused localhost port from 1024-65535 and return it. | def _unused_port(socket_type: int) -> int:
"""Find an unused localhost port from 1024-65535 and return it."""
with contextlib.closing(socket.socket(type=socket_type)) as sock:
sock.bind(("127.0.0.1", 0))
return sock.getsockname()[1] |
Test that one can specify the use_colors option when using the default logging
config. | def test_log_config_default(
mocked_logging_config_module: MagicMock,
use_colors: bool | None,
expected: bool | None,
logging_config: dict[str, Any],
) -> None:
"""
Test that one can specify the use_colors option when using the default logging
config.
"""
config = Config(app=asgi_app, use_colors=use_colors, log_config=logging_config)
config.load()
mocked_logging_config_module.dictConfig.assert_called_once_with(logging_config)
(provided_dict_config,), _ = mocked_logging_config_module.dictConfig.call_args
assert provided_dict_config["formatters"]["default"]["use_colors"] == expected |
Test that one can load a json config from disk. | def test_log_config_json(
mocked_logging_config_module: MagicMock,
logging_config: dict[str, Any],
json_logging_config: str,
mocker: MockerFixture,
) -> None:
"""
Test that one can load a json config from disk.
"""
mocked_open = mocker.patch("uvicorn.config.open", mocker.mock_open(read_data=json_logging_config))
config = Config(app=asgi_app, log_config="log_config.json")
config.load()
mocked_open.assert_called_once_with("log_config.json")
mocked_logging_config_module.dictConfig.assert_called_once_with(logging_config) |
Test that one can load a yaml config from disk. | def test_log_config_yaml(
mocked_logging_config_module: MagicMock,
logging_config: dict[str, Any],
yaml_logging_config: str,
mocker: MockerFixture,
config_filename: str,
) -> None:
"""
Test that one can load a yaml config from disk.
"""
mocked_open = mocker.patch("uvicorn.config.open", mocker.mock_open(read_data=yaml_logging_config))
config = Config(app=asgi_app, log_config=config_filename)
config.load()
mocked_open.assert_called_once_with(config_filename)
mocked_logging_config_module.dictConfig.assert_called_once_with(logging_config) |
Test that one can load a configparser config from disk. | def test_log_config_file(
mocked_logging_config_module: MagicMock,
config_file: str | configparser.RawConfigParser | typing.IO[Any],
) -> None:
"""
Test that one can load a configparser config from disk.
"""
config = Config(app=asgi_app, log_config=config_file)
config.load()
mocked_logging_config_module.fileConfig.assert_called_once_with(config_file, disable_existing_loggers=False) |
Test that one can load environment variables using an env file. | def test_env_file(
web_concurrency: int,
forwarded_allow_ips: str,
caplog: pytest.LogCaptureFixture,
tmp_path: Path,
) -> None:
"""
Test that one can load environment variables using an env file.
"""
fp = tmp_path / ".env"
content = f"WEB_CONCURRENCY={web_concurrency}\n" f"FORWARDED_ALLOW_IPS={forwarded_allow_ips}\n"
fp.write_text(content)
with caplog.at_level(logging.INFO):
config = Config(app=asgi_app, env_file=fp)
config.load()
assert config.workers == int(str(os.getenv("WEB_CONCURRENCY")))
assert config.forwarded_allow_ips == os.getenv("FORWARDED_ALLOW_IPS")
assert len(caplog.records) == 1
assert f"Loading environment from '{fp}'" in caplog.records[0].message |
Replace `sig` handling with a normal exception via `signal | def capture_signal_sync(sig: signal.Signals) -> Generator[list[int], None, None]:
"""Replace `sig` handling with a normal exception via `signal"""
witness: list[int] = []
original_handler = signal.signal(sig, lambda signum, frame: witness.append(signum))
yield witness
signal.signal(sig, original_handler) |
Replace `sig` handling with a normal exception via `asyncio | def capture_signal_async(sig: signal.Signals) -> Generator[list[int], None, None]: # pragma: py-win32
"""Replace `sig` handling with a normal exception via `asyncio"""
witness: list[int] = []
original_handler = signal.getsignal(sig)
asyncio.get_running_loop().add_signal_handler(sig, witness.append, sig)
yield witness
signal.signal(sig, original_handler) |
Changes working directory and returns to previous on exit. | def as_cwd(path: Path):
"""Changes working directory and returns to previous on exit."""
prev_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd) |
A basic sanity check.
Simply run the supervisor against a no-op server, and signal for it to
quit immediately. | def test_multiprocess_run() -> None:
"""
A basic sanity check.
Simply run the supervisor against a no-op server, and signal for it to
quit immediately.
"""
config = Config(app=app, workers=2)
supervisor = Multiprocess(config, target=run, sockets=[])
supervisor.signal_handler(sig=signal.SIGINT, frame=None)
supervisor.run() |
Called in the parent process, to instantiate a new child process instance.
The child is not yet started at this point.
* config - The Uvicorn configuration instance.
* target - A callable that accepts a list of sockets. In practice this will
be the `Server.run()` method.
* sockets - A list of sockets to pass to the server. Sockets are bound once
by the parent process, and then passed to the child processes. | def get_subprocess(
config: Config,
target: Callable[..., None],
sockets: list[socket],
) -> SpawnProcess:
"""
Called in the parent process, to instantiate a new child process instance.
The child is not yet started at this point.
* config - The Uvicorn configuration instance.
* target - A callable that accepts a list of sockets. In practice this will
be the `Server.run()` method.
* sockets - A list of sockets to pass to the server. Sockets are bound once
by the parent process, and then passed to the child processes.
"""
# We pass across the stdin fileno, and reopen it in the child process.
# This is required for some debugging environments.
try:
stdin_fileno = sys.stdin.fileno()
# The `sys.stdin` can be `None`, see https://docs.python.org/3/library/sys.html#sys.__stdin__.
except (AttributeError, OSError):
stdin_fileno = None
kwargs = {
"config": config,
"target": target,
"sockets": sockets,
"stdin_fileno": stdin_fileno,
}
return spawn.Process(target=subprocess_started, kwargs=kwargs) |
Called when the child process starts.
* config - The Uvicorn configuration instance.
* target - A callable that accepts a list of sockets. In practice this will
be the `Server.run()` method.
* sockets - A list of sockets to pass to the server. Sockets are bound once
by the parent process, and then passed to the child processes.
* stdin_fileno - The file number of sys.stdin, so that it can be reattached
to the child process. | def subprocess_started(
config: Config,
target: Callable[..., None],
sockets: list[socket],
stdin_fileno: int | None,
) -> None:
"""
Called when the child process starts.
* config - The Uvicorn configuration instance.
* target - A callable that accepts a list of sockets. In practice this will
be the `Server.run()` method.
* sockets - A list of sockets to pass to the server. Sockets are bound once
by the parent process, and then passed to the child processes.
* stdin_fileno - The file number of sys.stdin, so that it can be reattached
to the child process.
"""
# Re-open stdin.
if stdin_fileno is not None:
sys.stdin = os.fdopen(stdin_fileno)
# Logging needs to be setup again for each child.
config.configure_logging()
# Now we can call into `Server.run(sockets=sockets)`
target(sockets=sockets) |
Return an ASGI message, with any body-type content omitted and replaced
with a placeholder. | def message_with_placeholders(message: Any) -> Any:
"""
Return an ASGI message, with any body-type content omitted and replaced
with a placeholder.
"""
new_message = message.copy()
for attr in PLACEHOLDER_FORMAT.keys():
if message.get(attr) is not None:
content = message[attr]
placeholder = PLACEHOLDER_FORMAT[attr].format(length=len(content))
new_message[attr] = placeholder
return new_message |
Builds a scope and request message into a WSGI environ object. | def build_environ(scope: HTTPScope, message: ASGIReceiveEvent, body: io.BytesIO) -> Environ:
"""
Builds a scope and request message into a WSGI environ object.
"""
script_name = scope.get("root_path", "").encode("utf8").decode("latin1")
path_info = scope["path"].encode("utf8").decode("latin1")
if path_info.startswith(script_name):
path_info = path_info[len(script_name) :]
environ = {
"REQUEST_METHOD": scope["method"],
"SCRIPT_NAME": script_name,
"PATH_INFO": path_info,
"QUERY_STRING": scope["query_string"].decode("ascii"),
"SERVER_PROTOCOL": "HTTP/%s" % scope["http_version"],
"wsgi.version": (1, 0),
"wsgi.url_scheme": scope.get("scheme", "http"),
"wsgi.input": body,
"wsgi.errors": sys.stdout,
"wsgi.multithread": True,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
# Get server name and port - required in WSGI, not in ASGI
server = scope.get("server")
if server is None:
server = ("localhost", 80)
environ["SERVER_NAME"] = server[0]
environ["SERVER_PORT"] = server[1]
# Get client IP address
client = scope.get("client")
if client is not None:
environ["REMOTE_ADDR"] = client[0]
# Go through headers and make them into environ entries
for name, value in scope.get("headers", []):
name_str: str = name.decode("latin1")
if name_str == "content-length":
corrected_name = "CONTENT_LENGTH"
elif name_str == "content-type":
corrected_name = "CONTENT_TYPE"
else:
corrected_name = "HTTP_%s" % name_str.upper().replace("-", "_")
# HTTPbis say only ASCII chars are allowed in headers, but we latin1
# just in case
value_str: str = value.decode("latin1")
if corrected_name in environ:
corrected_name_environ = environ[corrected_name]
assert isinstance(corrected_name_environ, str)
value_str = corrected_name_environ + "," + value_str
environ[corrected_name] = value_str
return environ |
Load a config file and merge it into the default options. | def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as fopen:
yaml_config = AttrDict(yaml.load(fopen))
merge_dicts(yaml_config, __C) |
Set config keys via list (e.g., from command line). | def cfg_from_list(args_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(args_list) % 2 == 0, 'Specify values or keys for args'
for key, value in zip(args_list[0::2], args_list[1::2]):
key_list = key.split('.')
cfg = __C
for subkey in key_list[:-1]:
assert subkey in cfg, 'Config key {} not found'.format(subkey)
cfg = cfg[subkey]
subkey = key_list[-1]
assert subkey in cfg, 'Config key {} not found'.format(subkey)
try:
# handle the case when v is a string literal
val = literal_eval(value)
except BaseException:
val = value
assert isinstance(val, type(cfg[subkey])) or cfg[subkey] is None, \
'type {} does not match original type {}'.format(
type(val), type(cfg[subkey]))
cfg[subkey] = val |
case 1: CHECKPOINT.RESUME = False and TRAIN.PARAMS_FILE is not none:
load params_file
case 2: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is not none:
case 2a: if checkpoint exist: use checkpoint
case 2b: if checkpoint not exist: use params_file
case 3: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is none:
case 3a: if checkpoint exist: use checkpoint
case 3b: if checkpoint not exist: set start_model_iter = 0 | def load_model_from_params_file(model):
"""
case 1: CHECKPOINT.RESUME = False and TRAIN.PARAMS_FILE is not none:
load params_file
case 2: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is not none:
case 2a: if checkpoint exist: use checkpoint
case 2b: if checkpoint not exist: use params_file
case 3: CHECKPOINT.RESUME = True and TRAIN.PARAMS_FILE is none:
case 3a: if checkpoint exist: use checkpoint
case 3b: if checkpoint not exist: set start_model_iter = 0
"""
use_checkpoint = cfg.CHECKPOINT.RESUME and find_checkpoint()
if cfg.TRAIN.PARAMS_FILE and not use_checkpoint:
logger.info('Initializing from pre-trained file...')
start_model_iter, prev_lr = initialize_params_from_file(
model=model, weights_file=cfg.TRAIN.PARAMS_FILE,
load_momentum=False, # not load momentum if it is pretrained
)
logger.info(('Loaded: start_model_iter: {}; prev_lr: {:.8f}').format(
start_model_iter, prev_lr))
model.current_lr = prev_lr
# correct start_model_iter if pretraining uses a different batch size
# (mainly used for 1-node warmup)
if cfg.TRAIN.RESUME_FROM_BATCH_SIZE > 0:
start_model_iter = misc.resume_from(start_model_iter)
# if we only want the weights
if cfg.TRAIN.RESET_START_ITER:
start_model_iter = 0
elif use_checkpoint:
logger.info('Initializing from checkpoints...')
start_model_iter, prev_lr = initialize_params_from_file(
model=model, weights_file=get_checkpoint_resume_file())
logger.info(('Loaded: start_model_iter: {}; prev_lr: {:.8f}').format(
start_model_iter, prev_lr))
model.current_lr = prev_lr
else: # no checkpoint, no params_file
# Do nothing and return 0
start_model_iter = 0
logger.info('No checkpoint found; training from scratch...')
return start_model_iter |
Get the learning rate at iteration it according to the cfg.SOLVER
settings. | def get_lr_at_iter(it):
"""Get the learning rate at iteration it according to the cfg.SOLVER
settings.
"""
lr = get_lr_func()(it)
lr = np.float32(lr)
"""
Warmup hacks (gradual linear):
Example:
cfg.SOLVER.WARMUP.WARMUP_START_LR: 0.1
cfg.SOLVER.WARMUP.WARMUP_END_ITER: 5005 * 5
get_lr_func()(5005 * 5) = 3.2
for cur_iter in [0, 5005 * 5) linearly from [0.1, 3.2)
"""
last_it = cfg.SOLVER.WARMUP.WARMUP_END_ITER
if cfg.SOLVER.WARMUP.WARMUP_ON and it < last_it:
lr_start = np.float32(cfg.SOLVER.WARMUP.WARMUP_START_LR)
lr_end = np.float32(get_lr_func()(last_it))
lr = it * (lr_end - lr_start) / (last_it - 1) + lr_start
return np.float32(lr) |
For cfg.SOLVER.LR_POLICY = 'steps_with_lrs'
Change the learning rate to specified values at specified iterations.
Example:
cfg.SOLVER.MAX_ITER: 90
cfg.SOLVER.STEPS: [0, 60, 80]
cfg.SOLVER.LRS: [0.02, 0.002, 0.0002]
for cur_iter in [0, 59] use 0.02
in [60, 79] use 0.002
in [80, inf] use 0.0002 | def lr_func_steps_with_lrs(cur_iter):
"""
For cfg.SOLVER.LR_POLICY = 'steps_with_lrs'
Change the learning rate to specified values at specified iterations.
Example:
cfg.SOLVER.MAX_ITER: 90
cfg.SOLVER.STEPS: [0, 60, 80]
cfg.SOLVER.LRS: [0.02, 0.002, 0.0002]
for cur_iter in [0, 59] use 0.02
in [60, 79] use 0.002
in [80, inf] use 0.0002
"""
ind = get_step_index(cur_iter)
return cfg.SOLVER.LRS[ind] |
For cfg.SOLVER.LR_POLICY = 'steps_with_relative_lrs'
Change the learning rate to specified values at specified iterations.
Example:
cfg.SOLVER.MAX_ITER: 90
cfg.SOLVER.STEPS: [0, 60, 80]
cfg.SOLVER.BASE_LR: 0.02
cfg.SOLVER.LRS: [1, 0.1, 0.01]
for cur_iter in [0, 59] use 0.02
in [60, 79] use 0.002
in [80, inf] use 0.0002 | def lr_func_steps_with_relative_lrs(cur_iter):
"""
For cfg.SOLVER.LR_POLICY = 'steps_with_relative_lrs'
Change the learning rate to specified values at specified iterations.
Example:
cfg.SOLVER.MAX_ITER: 90
cfg.SOLVER.STEPS: [0, 60, 80]
cfg.SOLVER.BASE_LR: 0.02
cfg.SOLVER.LRS: [1, 0.1, 0.01]
for cur_iter in [0, 59] use 0.02
in [60, 79] use 0.002
in [80, inf] use 0.0002
"""
ind = get_step_index(cur_iter)
return cfg.SOLVER.LRS[ind] * cfg.SOLVER.BASE_LR |
For cfg.SOLVER.LR_POLICY = 'steps_with_decay'
Change the learning rate specified iterations based on the formula
lr = base_lr * gamma ** lr_step_count.
Example:
cfg.SOLVER.MAX_ITER: 90
cfg.SOLVER.STEPS: [0, 60, 80]
cfg.SOLVER.BASE_LR: 0.02
cfg.SOLVER.GAMMA: 0.1
for cur_iter in [0, 59] use 0.02 = 0.02 * 0.1 ** 0
in [60, 79] use 0.002 = 0.02 * 0.1 ** 1
in [80, inf] use 0.0002 = 0.02 * 0.1 ** 2 | def lr_func_steps_with_decay(cur_iter):
"""
For cfg.SOLVER.LR_POLICY = 'steps_with_decay'
Change the learning rate specified iterations based on the formula
lr = base_lr * gamma ** lr_step_count.
Example:
cfg.SOLVER.MAX_ITER: 90
cfg.SOLVER.STEPS: [0, 60, 80]
cfg.SOLVER.BASE_LR: 0.02
cfg.SOLVER.GAMMA: 0.1
for cur_iter in [0, 59] use 0.02 = 0.02 * 0.1 ** 0
in [60, 79] use 0.002 = 0.02 * 0.1 ** 1
in [80, inf] use 0.0002 = 0.02 * 0.1 ** 2
"""
ind = get_step_index(cur_iter)
return cfg.SOLVER.BASE_LR * cfg.SOLVER.GAMMA ** ind |
For cfg.SOLVER.LR_POLICY = 'step' | def lr_func_step(cur_iter):
"""
For cfg.SOLVER.LR_POLICY = 'step'
"""
return (
cfg.SOLVER.BASE_LR *
cfg.SOLVER.GAMMA ** (cur_iter // cfg.SOLVER.STEP_SIZE)) |
Given an iteration, find which learning rate step we're at. | def get_step_index(cur_iter):
"""Given an iteration, find which learning rate step we're at."""
assert cfg.SOLVER.STEPS[0] == 0, 'The first step should always start at 0.'
steps = cfg.SOLVER.STEPS + [cfg.SOLVER.MAX_ITER]
for ind, step in enumerate(steps): # NoQA
if cur_iter < step:
break
return ind - 1 |
Compute the number of corret hits | def compute_topk_correct_hits(top_k, preds, labels):
'''Compute the number of corret hits'''
batch_size = preds.shape[0]
top_k_preds = np.zeros((batch_size, top_k), dtype=np.float32)
for i in range(batch_size):
top_k_preds[i, :] = np.argsort(-preds[i, :])[:top_k]
correctness = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
if labels[i] in top_k_preds[i, :].astype(np.int32).tolist():
correctness[i] = 1
correct_hits = sum(correctness)
return correct_hits |
Summed values of a blob on each gpu | def sum_multi_gpu_blob(blob_name):
"""Summed values of a blob on each gpu"""
value = 0
num_gpus = cfg.NUM_GPUS
root_gpu_id = cfg.ROOT_GPU_ID
for idx in range(root_gpu_id, root_gpu_id + num_gpus):
value += workspace.FetchBlob('gpu_{}/{}'.format(idx, blob_name))
return value |
Summed values of batch size on each gpu | def get_batch_size_from_workspace():
"""Summed values of batch size on each gpu"""
value = 0
num_gpus = cfg.NUM_GPUS
root_gpu_id = cfg.ROOT_GPU_ID
for idx in range(root_gpu_id, root_gpu_id + num_gpus):
value += workspace.FetchBlob('gpu_{}/{}'.format(idx, 'pred')).shape[0]
return value |
To save test-time memory, we perform multi-clip test in multiple "sections":
e.g., 10-clip test can be done in 2 sections of 5-clip test | def test_net_one_section():
"""
To save test-time memory, we perform multi-clip test in multiple "sections":
e.g., 10-clip test can be done in 2 sections of 5-clip test
"""
timer = Timer()
results = []
seen_inds = defaultdict(int)
logger.warning('Testing started...') # for monitoring cluster jobs
test_model = model_builder_video.ModelBuilder(
name='{}_test'.format(cfg.MODEL.MODEL_NAME), train=False,
use_cudnn=True, cudnn_exhaustive_search=True,
split=cfg.TEST.DATA_TYPE)
test_model.build_model()
if cfg.PROF_DAG:
test_model.net.Proto().type = 'prof_dag'
else:
test_model.net.Proto().type = 'dag'
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net)
misc.save_net_proto(test_model.net)
misc.save_net_proto(test_model.param_init_net)
total_test_net_iters = int(
math.ceil(float(cfg.TEST.DATASET_SIZE * cfg.TEST.NUM_TEST_CLIPS) / cfg.TEST.BATCH_SIZE))
if cfg.TEST.PARAMS_FILE:
checkpoints.load_model_from_params_file_for_test(
test_model, cfg.TEST.PARAMS_FILE)
else:
raise Exception('No params files specified for testing model.')
for test_iter in range(total_test_net_iters):
timer.tic()
workspace.RunNet(test_model.net.Proto().name)
timer.toc()
if test_iter == 0:
misc.print_net(test_model)
os.system('nvidia-smi')
test_debug = False
if test_debug is True:
save_path = 'temp_save/'
data_blob = workspace.FetchBlob('gpu_0/data')
label_blob = workspace.FetchBlob('gpu_0/labels')
print(label_blob)
data_blob = data_blob * cfg.MODEL.STD + cfg.MODEL.MEAN
for i in range(data_blob.shape[0]):
for j in range(4):
temp_img = data_blob[i, :, j, :, :]
temp_img = temp_img.transpose([1, 2, 0])
temp_img = temp_img.astype(np.uint8)
fname = save_path + 'ori_' + str(test_iter) \
+ '_' + str(i) + '_' + str(j) + '.jpg'
cv2.imwrite(fname, temp_img)
video_ids_list = [] # for logging
for gpu_id in range(cfg.NUM_GPUS):
prefix = 'gpu_{}/'.format(gpu_id)
softmax_gpu = workspace.FetchBlob(prefix + cfg.TEST.OUTPUT_NAME)
softmax_gpu = softmax_gpu.reshape((softmax_gpu.shape[0], -1))
# This is the index of the video for recording results, not the actual class label for the video
video_id_gpu = workspace.FetchBlob(prefix + 'labels')
for i in range(len(video_id_gpu)):
seen_inds[video_id_gpu[i]] += 1
video_ids_list.append(video_id_gpu[0])
# print(video_id_gpu)
# collect results
for i in range(softmax_gpu.shape[0]):
probs = softmax_gpu[i].tolist()
vid = video_id_gpu[i]
if seen_inds[vid] > cfg.TEST.NUM_TEST_CLIPS:
logger.warning('Video id {} have been seen. Skip.'.format(
vid,))
continue
save_pairs = [vid, probs]
results.append(save_pairs)
# ---- log
eta = timer.average_time * (total_test_net_iters - test_iter - 1)
eta = str(datetime.timedelta(seconds=int(eta)))
logger.info(('{}/{} iter ({}/{} videos):' +
' Time: {:.3f} (ETA: {}). ID: {}').format(
test_iter, total_test_net_iters,
len(seen_inds), cfg.TEST.DATASET_SIZE,
timer.diff, eta,
video_ids_list,))
return results |
a simpler wrapper that creates the elements for train/test models | def create_wrapper(is_train):
"""
a simpler wrapper that creates the elements for train/test models
"""
if is_train:
suffix = '_train'
split = cfg.TRAIN.DATA_TYPE
use_mem_cache = cfg.TRAIN.MEM_CACHE
else: # is test
suffix = '_test'.format(cfg.MODEL.MODEL_NAME)
split = cfg.TEST.DATA_TYPE
use_mem_cache = True # we always cache for test
model = model_builder_video.ModelBuilder(
name=cfg.MODEL.MODEL_NAME + suffix,
train=is_train,
use_cudnn=True,
cudnn_exhaustive_search=True,
ws_nbytes_limit=(cfg.CUDNN_WORKSPACE_LIMIT * 1024 * 1024),
split=split,
use_mem_cache=use_mem_cache,
)
model.build_model()
if cfg.PROF_DAG:
model.net.Proto().type = 'prof_dag'
else:
model.net.Proto().type = 'dag'
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
# model.start_data_loader()
timer = Timer()
meter = metrics.MetricsCalculator(model=model, split=split)
misc.save_net_proto(model.net)
misc.save_net_proto(model.param_init_net)
return model, timer, meter |
Bernstein polynomial. | def bernstein(n, k):
"""Bernstein polynomial."""
coeff = binom(n, k)
def _bpoly(x):
return coeff * x**k * (1 - x) ** (n - k)
return _bpoly |
Build Bézier curve from points. | def bezier(points, at):
"""Build Bézier curve from points."""
warnings.warn(
message="Deprecated. CatmulClark builds nicer splines.",
category=FutureWarning,
stacklevel=1,
)
at = np.asarray(at)
at_flat = at.ravel()
n = len(points)
curve = np.zeros((at_flat.shape[0], 2))
for ii in range(n):
curve += np.outer(bernstein(n - 1, ii)(at_flat), points[ii])
return curve.reshape((*at.shape, 2)) |
Parses a line of a requirements.txt file. | def _strip_comments_from_line(s: str) -> str:
"""Parses a line of a requirements.txt file."""
requirement, *_ = s.split('#')
return requirement.strip() |
Returns a list of dependencies for setup() from requirements.txt. | def _parse_requirements(requirements_txt_path: str) -> list[str]:
"""Returns a list of dependencies for setup() from requirements.txt."""
# Currently a requirements.txt is being used to specify dependencies. In order
# to avoid specifying it in two places, we're going to use that file as the
# source of truth.
with open(requirements_txt_path) as fp:
# Parse comments.
lines = [_strip_comments_from_line(line) for line in fp.read().splitlines()]
# Remove empty lines and direct github repos (not allowed in PyPI setups)
return [l for l in lines if (l and 'github.com' not in l)] |
Dummy evaluator used as an example. | def evaluate_trial(trial: vz.Trial) -> vz.Measurement:
"""Dummy evaluator used as an example."""
learning_rate = trial.parameters.get_value('learning_rate')
num_layers = trial.parameters.get_value('num_layers')
m = vz.Measurement()
m.metrics = {'accuracy': learning_rate * num_layers} # dummy accuracy
if FLAGS.multiobjective:
m.metrics['latency'] = 0.5 * num_layers
return m |
Default optimizer and random restarts that work okay for most cases. | def default_optimizer(maxiter: int = 50) -> Optimizer:
"""Default optimizer and random restarts that work okay for most cases."""
# NOTE: Production algorithms are recommended to stay away from using this.
return JaxoptScipyLbfgsB(LbfgsBOptions(maxiter=maxiter, best_n=None)) |
Converts a dict of (..., D_i) arrays to a (..., \sum_i D_i) array. | def dict_to_array(array_dict: Mapping[Any, np.ndarray]) -> np.ndarray:
r"""Converts a dict of (..., D_i) arrays to a (..., \sum_i D_i) array."""
return np.concatenate(list(array_dict.values()), axis=-1) |
Create a default getter for the given parameter config. | def _create_default_getter(
pconfig: pyvizier.ParameterConfig,
) -> Callable[[pyvizier.TrialSuggestion], Any]:
"""Create a default getter for the given parameter config."""
def getter(trial, pconfig=pconfig):
if pconfig.name not in trial.parameters:
return None
pvalue = trial.parameters[pconfig.name]
if pconfig.type == pyvizier.ParameterType.DOUBLE:
return pvalue.as_float
elif pconfig.type == pyvizier.ParameterType.DISCRETE:
return pvalue.as_float
elif pconfig.type == pyvizier.ParameterType.INTEGER:
return pvalue.as_int
else:
return pvalue.as_str
return getter |
Compute the Kumaraswamy CDF.
Arguments:
x: values in [0,1]. shape: (num_samples, num_features)
a: positive value.
b: positive value.
Returns:
The CDF(x). shape: (num_samples, num_cdfs). | def kumaraswamy_cdf(x: np.ndarray, a: float, b: float) -> np.ndarray:
"""Compute the Kumaraswamy CDF.
Arguments:
x: values in [0,1]. shape: (num_samples, num_features)
a: positive value.
b: positive value.
Returns:
The CDF(x). shape: (num_samples, num_cdfs).
"""
return 1 - (1 - x**a) ** b |
Compute the inverse of the Kumaraswamy CDF.
Arguments:
f: values in [0,1]. shape: (num_samples, num_cdfs)
a: positive value.
b: positive value.
Returns:
The Inv_CDF(x). shape: (num_samples, num_features). | def kumaraswamy_inv_cdf(f: np.ndarray, a: float, b: float) -> np.ndarray:
"""Compute the inverse of the Kumaraswamy CDF.
Arguments:
f: values in [0,1]. shape: (num_samples, num_cdfs)
a: positive value.
b: positive value.
Returns:
The Inv_CDF(x). shape: (num_samples, num_features).
"""
return (1 - (1 - f) ** (1 / b)) ** (1 / a) |
Returns the padded shape according to `padding_types`. | def _padded_dimensions(
dims: Sequence[int], padding_types: Sequence[PaddingType]
) -> tuple[int, ...]:
"""Returns the padded shape according to `padding_types`."""
new_dims = []
for dim, padding_type in zip(dims, padding_types):
if padding_type == PaddingType.NONE:
new_dims.append(dim)
elif padding_type == PaddingType.MULTIPLES_OF_10:
new_dims.append(int(math.ceil(dim / 10.0)) * 10)
elif padding_type == PaddingType.POWERS_OF_2:
if dim == 0:
new_dims.append(0)
else:
new_dims.append(int(2 ** (math.ceil(math.log(dim, 2)))))
else:
raise ValueError(f'{padding_type} unexpected.')
return tuple(new_dims) |
Assertion function for comparing two (nested) dictionaries. | def assert_arraytree_allclose(
d1: Mapping[str, Any], d2: Mapping[str, Any], **kwargs
) -> None:
"""Assertion function for comparing two (nested) dictionaries."""
np.testing.assert_equal(d1.keys(), d2.keys())
for k, v in d1.items():
if isinstance(v, dict):
assert_arraytree_allclose(v, d2[k], **kwargs)
else:
try:
np.testing.assert_allclose(v, d2[k], **kwargs)
except TypeError as e:
np.testing.assert_equal(
v, d2[k], err_msg=f'Using assert_equal due to typeerror: {e}.'
) |
Search space with float parameter types. | def flat_continuous_space_with_scaling() -> vz.SearchSpace:
"""Search space with float parameter types."""
space = vz.SearchSpace()
root = space.root
root.add_float_param('lineardouble', -1., 2.)
root.add_float_param('logdouble', 1e-4, 1e2, scale_type=vz.ScaleType.LOG)
return space |
Trials of search space with float parameter types. | def flat_continuous_space_with_scaling_trials(
count: int = 1,
) -> list[vz.TrialSuggestion]:
"""Trials of search space with float parameter types."""
trials = []
for _ in range(count):
trials.append(
vz.Trial({
'lineardouble': np.random.uniform(low=-1.0, high=2.0),
'logdouble': np.random.uniform(low=1e-4, high=1e2),
})
)
return trials |
Search space with all parameter types. | def flat_space_with_all_types() -> vz.SearchSpace:
"""Search space with all parameter types."""
space = vz.SearchSpace()
root = space.root
root.add_float_param('lineardouble', -1., 2.)
root.add_float_param('logdouble', 1e-4, 1e2, scale_type=vz.ScaleType.LOG)
root.add_int_param('integer', -2, 2)
root.add_categorical_param('categorical', ['a', 'aa', 'aaa'])
root.add_bool_param('boolean')
root.add_discrete_param('discrete_double', [-.5, 1.0, 1.2])
root.add_discrete_param(
'discrete_logdouble', [1e-5, 1e-2, 1e-1], scale_type=vz.ScaleType.LOG)
root.add_discrete_param('discrete_int', [-1, 1, 2])
return space |
Conditional space for a simple AutoML task. | def conditional_automl_space() -> vz.SearchSpace:
"""Conditional space for a simple AutoML task."""
space = vz.SearchSpace()
root = space.select_root()
root.add_categorical_param(
'model_type', ['linear', 'dnn'], default_value='dnn'
)
dnn = root.select('model_type', ['dnn'])
dnn.add_float_param(
'learning_rate',
0.0001,
1.0,
default_value=0.001,
scale_type=vz.ScaleType.LOG,
)
linear = root.select('model_type', ['linear'])
linear.add_float_param(
'learning_rate', 0.1, 1.0, default_value=0.1, scale_type=vz.ScaleType.LOG
)
_ = dnn.add_categorical_param('optimizer_type', ['adam', 'evolution'])
# Chained select() calls, path length of 1.
root.select('model_type', ['dnn']).select(
'optimizer_type', ['adam']
).add_float_param(
'learning_rate', 0.1, 1.0, default_value=0.1, scale_type=vz.ScaleType.LOG
)
# Chained select() calls, path length of 2.
ko = (
root.select('model_type', ['dnn'])
.select('optimizer_type', ['adam'])
.add_bool_param('use_special_logic', default_value=False)
)
ko2 = ko.select_values(['True'])
_ = ko2.add_float_param(
'special_logic_parameter', 1.0, 3.0, default_value=2.1
)
return space |
Creates a shape validator for attrs.
For example, _shape_equals(lambda s : [3, None]) validates that the shape has
length 2 and its first element is 3.
Code Example:
@attrs.define
class TestAttr:
x = attrs.field(validator=attrs_utils.shape_equals(lambda v: (3, v.d)))
d = attrs.field()
_TestAttr(np.zeros([3, 2]), 2) # OK
_TestAttr(np.zeros([3, 5]), None) # OK
_TestAttr(np.zeros([3, 2]), 4) # Raises ValueError
Args:
instance_to_shape: Takes instance as input and returns the desired shape for
the instance. `None` is treated as "any number".
Returns:
A validator that can be passed into attrs.ib or attrs.field. | def shape_equals(instance_to_shape: Callable[[Any], Collection[Optional[int]]]):
"""Creates a shape validator for attrs.
For example, _shape_equals(lambda s : [3, None]) validates that the shape has
length 2 and its first element is 3.
Code Example:
@attrs.define
class TestAttr:
x = attrs.field(validator=attrs_utils.shape_equals(lambda v: (3, v.d)))
d = attrs.field()
_TestAttr(np.zeros([3, 2]), 2) # OK
_TestAttr(np.zeros([3, 5]), None) # OK
_TestAttr(np.zeros([3, 2]), 4) # Raises ValueError
Args:
instance_to_shape: Takes instance as input and returns the desired shape for
the instance. `None` is treated as "any number".
Returns:
A validator that can be passed into attrs.ib or attrs.field.
"""
def validator(instance, attribute, value) -> None:
shape = instance_to_shape(instance)
def _validator_boolean():
if len(value.shape) != len(shape):
return False
for s1, s2 in zip(value.shape, shape):
if (s2 is not None) and (s1 != s2):
return False
return True
if not _validator_boolean():
raise ValueError(f'{attribute.name} has shape {value.shape} '
f'which does not match the expected shape {shape}')
return validator |
Example: json.loads(..., object_hook=numpy_hook). | def numpy_hook(obj: Any) -> Any:
"""Example: json.loads(..., object_hook=numpy_hook)."""
if 'dtype' not in obj:
return obj
if 'shape' not in obj:
return obj
return np.array(obj['value'], dtype=obj['dtype']).reshape(obj['shape']) |
Context manager for turning on the profiler. | def collect_events() -> Generator[List[ProfileEvent], None, None]:
"""Context manager for turning on the profiler."""
try:
if _GLOBAL_SOTRAGE.active:
raise RuntimeError(
'There can be only one `collect_events()` context manager active at'
' the same time.'
)
_GLOBAL_SOTRAGE.active = True
yield _GLOBAL_SOTRAGE.events
finally:
_GLOBAL_SOTRAGE.active = False
# It's important to create a new instance here as opposed to resetting
# the same object, so the yieled `events` persist outside the context.
_GLOBAL_SOTRAGE.events = list() |
Context manager for measuring the timing.
Example:
```
with timeit('scope_name') as duration:
...
duration() # returns the duration.
```
Also see: record_runtime, which is the decorator equivalent of this.
Args:
name:
also_log: If True, also create a log.
Yields:
A callable with zero input arguments. Returns the elapsed time until call,
or the end of the context, whichever comes earlier. | def timeit(
name: str, also_log: bool = False
) -> Generator[Callable[[], datetime.timedelta], None, None]:
"""Context manager for measuring the timing.
Example:
```
with timeit('scope_name') as duration:
...
duration() # returns the duration.
```
Also see: record_runtime, which is the decorator equivalent of this.
Args:
name:
also_log: If True, also create a log.
Yields:
A callable with zero input arguments. Returns the elapsed time until call,
or the end of the context, whichever comes earlier.
"""
start = datetime.datetime.now()
duration = None
try:
with _enter_profile_scope(name):
def func() -> datetime.timedelta:
if duration is None:
return datetime.datetime.now() - start
return duration
yield func
finally:
duration = datetime.datetime.now() - start
event = _GLOBAL_SOTRAGE.add(EventType.TIMER, (name, duration))
if also_log:
logging.info(
'Timed %s: took %s seconds. Full event string: %s',
name,
duration.total_seconds(),
event,
) |
Decorates the function to record the runtime.
Also see: timeit(), which is the context manager equivalent of this.
Args:
func: Function being decorated.
name_prefix: A prefix to add to the function name.
name: The name to record. Defaults to func.__qualname__.
also_log: Whether to also logging.info the runtime duration.
block_until_ready: If True and running on an acceleartor, wait for the async
dispatch results from this function, if any. See
https://jax.readthedocs.io/en/latest/async_dispatch.html
Returns:
Decorated function, or decorator. | def record_runtime(
func: Optional[Callable[..., Any]] = None,
*,
name_prefix: str = '',
name: str = '',
also_log: bool = False,
block_until_ready: bool = False,
) -> Any:
"""Decorates the function to record the runtime.
Also see: timeit(), which is the context manager equivalent of this.
Args:
func: Function being decorated.
name_prefix: A prefix to add to the function name.
name: The name to record. Defaults to func.__qualname__.
also_log: Whether to also logging.info the runtime duration.
block_until_ready: If True and running on an acceleartor, wait for the async
dispatch results from this function, if any. See
https://jax.readthedocs.io/en/latest/async_dispatch.html
Returns:
Decorated function, or decorator.
"""
# This is required for the decorator to work both with and without
# optional arguments.
if func is None:
return functools.partial(
record_runtime,
name_prefix=name_prefix,
name=name,
also_log=also_log,
block_until_ready=block_until_ready,
)
name = name or func.__qualname__
full_name = name
if name_prefix:
full_name = f'{name_prefix}.{name}'
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Calculates runtime of the given function."""
with timeit(name=full_name, also_log=also_log):
result = func(*args, **kwargs)
if block_until_ready:
result = jax.block_until_ready(result)
return result
return wrapper |
Decorates the function to record the runtime of functions.
Args:
func: Function being decorated.
name: The name to record. Defaults to func.__qualname__.
also_log: Whether to also logging.info the runtime duration.
Returns:
Decorated function, or decorator. | def record_tracing(
func: Optional[Callable[..., Any]] = None,
*,
name: str = '',
also_log: bool = True,
) -> Any:
"""Decorates the function to record the runtime of functions.
Args:
func: Function being decorated.
name: The name to record. Defaults to func.__qualname__.
also_log: Whether to also logging.info the runtime duration.
Returns:
Decorated function, or decorator.
"""
# This is required for the decorator to work both with and without
# optional arguments.
if func is None:
return functools.partial(
record_tracing,
name=name,
also_log=also_log,
)
name = name or func.__qualname__
@functools.wraps(func)
def wrapper(*args, **kwargs):
if config.include_args_in_trace_records():
data = f'{name} with args={args} kwargs={kwargs}'
else:
data = name
event = _GLOBAL_SOTRAGE.add(EventType.JIT_TRACING, data)
if also_log:
logging.info('Tracing %s. Full event string: %s', name, event)
with _enter_profile_scope(name):
result = func(*args, **kwargs)
return result
return wrapper |
Subsets and Splits