code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def partition(self, tensor):
"""Partition tensor into blocks."""
assert tensor.shape == self._shape
tensors = [tensor]
for i, indices in self._splits:
tensors_local = []
for t in tensors:
tensors_local.extend(jnp.split(t, indices_or_sections=indices, axis=i))
tensors = tensors_local
return tensors | Partition tensor into blocks. | partition | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def merge_partitions(self, partitions):
"""Merge partitions back to original shape."""
for i, indices in reversed(self._splits):
n = len(indices) + 1
partial_merged_tensors = []
ind = 0
while ind < len(partitions):
partial_merged_tensors.append(
jnp.concatenate(partitions[ind : ind + n], axis=i)
)
ind += n
partitions = partial_merged_tensors
assert len(partitions) == 1
return partitions[0] | Merge partitions back to original shape. | merge_partitions | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def gram_weighted_update(old_stats, g, axis, w1, w2, precision=None):
"""Updated statistics via weighted average with new Gram matrix.
Returns w₁ R + w₂ Gᵀ G where R is `old_stats` and G is the matrix whose
columns are the flattened slices of the tensor `g` along the given `axis`.
(So, `old_stats` and the returned matrix have dimensions n x n where
n = `g.shape[axis]`).
Args:
old_stats: Old statistics.
g: Gradient tensor.
axis: Axis along which to slice `g`.
w1: Scalar weight for old statistics.
w2: Scalar weight for new Gram matrix.
precision: Optional precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise)
b) lax.Precision.HIGH (increased precision, slower)
c) lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
Weighted average of old and new statistics.
"""
axes = [i for i in range(g.ndim) if i != axis]
gram_matrix = jnp.tensordot(g, g, axes=(axes, axes), precision=precision)
return w1 * old_stats + w2 * gram_matrix | Updated statistics via weighted average with new Gram matrix.
Returns w₁ R + w₂ Gᵀ G where R is `old_stats` and G is the matrix whose
columns are the flattened slices of the tensor `g` along the given `axis`.
(So, `old_stats` and the returned matrix have dimensions n x n where
n = `g.shape[axis]`).
Args:
old_stats: Old statistics.
g: Gradient tensor.
axis: Axis along which to slice `g`.
w1: Scalar weight for old statistics.
w2: Scalar weight for new Gram matrix.
precision: Optional precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise)
b) lax.Precision.HIGH (increased precision, slower)
c) lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
Weighted average of old and new statistics. | gram_weighted_update | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def __init__(
self,
param,
block_size,
merge_small_dims_block_size,
best_effort_shape_interpretation,
preconditioner_type=PreconditionerType.ALL,
):
"""Initializes the preconditioner.
Args:
param: parameter to precondition.
block_size: Block size used to split param.
merge_small_dims_block_size: Block size for merging dims.
best_effort_shape_interpretation: Whether to collapse/merge dims together.
preconditioner_type: Type of preconditioner to use.
"""
self._original_shape = param.shape
self._transformed_shape = param.shape
if best_effort_shape_interpretation:
self._transformed_shape = merge_small_dims(
self._original_shape, merge_small_dims_block_size
)
reshaped_param = jnp.reshape(param, self._transformed_shape)
self._partitioner = BlockPartitioner(reshaped_param, block_size)
self._preconditioner_type = preconditioner_type | Initializes the preconditioner.
Args:
param: parameter to precondition.
block_size: Block size used to split param.
merge_small_dims_block_size: Block size for merging dims.
best_effort_shape_interpretation: Whether to collapse/merge dims together.
preconditioner_type: Type of preconditioner to use. | __init__ | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def updated_statistics_from_grad(
self,
stats,
grad,
w1,
w2,
to_float=None,
from_float=None,
precision=None,
):
"""Update statistics from gradients.
Args:
stats: Old statistics or its Cholesky factor if `cholesky` is True.
grad: Gradient to compute statistics from.
w1: Weight for old statistics.
w2: Weight for new statistics.
to_float: Optional function for converting stats to floating point.
from_float: Optional function for converting from floating point.
precision: Optional precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise)
b) lax.Precision.HIGH (increased precision, slower)
c) lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
A list of updated gradient statistics for each partition.
"""
to_float = to_float if to_float is not None else (lambda x: x)
from_float = from_float if from_float is not None else (lambda x: x)
update = functools.partial(gram_weighted_update, precision=precision)
reshaped_grad = jnp.reshape(grad, self._transformed_shape)
partitioned_grads = self._partitioner.partition(reshaped_grad)
new_stats = []
index = 0
for g in partitioned_grads:
should_preconditioned_dims = self.should_precondition_dims()
num_preconditioners = sum(should_preconditioned_dims)
for axis in range(num_preconditioners):
new_stat = update(to_float(stats[index]), g, axis, w1, w2)
new_stats.append(from_float(new_stat))
index += 1
return new_stats | Update statistics from gradients.
Args:
stats: Old statistics or its Cholesky factor if `cholesky` is True.
grad: Gradient to compute statistics from.
w1: Weight for old statistics.
w2: Weight for new statistics.
to_float: Optional function for converting stats to floating point.
from_float: Optional function for converting from floating point.
precision: Optional precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise)
b) lax.Precision.HIGH (increased precision, slower)
c) lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
A list of updated gradient statistics for each partition. | updated_statistics_from_grad | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def should_precondition_dims(self):
"""A vector containing indicator indicating if the dim is preconditioned."""
split_sizes = self._partitioner.split_sizes()
rank = len(split_sizes)
if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
return [True] * rank
else:
return [True] * (rank - 1) + [False] | A vector containing indicator indicating if the dim is preconditioned. | should_precondition_dims | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def shapes_for_preconditioners(self):
"""Returns shape from statistics."""
split_sizes = self._partitioner.split_sizes()
rank = len(split_sizes)
# We ignore preconditioner types if rank == 1
preconditioner_shapes = []
for t in itertools.product(*split_sizes):
if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
preconditioner_shapes.extend([[d, d] for d in t])
else:
preconditioner_shapes.extend([[d, d] for d in t[:-1]])
return preconditioner_shapes | Returns shape from statistics. | shapes_for_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def exponent_for_preconditioner(self):
"""Returns exponent to use for inverse-pth root M^{-1/p}."""
should_preconditioned_dims = self.should_precondition_dims()
num_preconditioners = sum(should_preconditioned_dims)
return 2 * num_preconditioners | Returns exponent to use for inverse-pth root M^{-1/p}. | exponent_for_preconditioner | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def preconditioned_grad(self, grad, preconditioners):
"""Precondition the gradient.
Args:
grad: A gradient tensor to precondition.
preconditioners: A list of preconditioners to apply.
Returns:
A preconditioned gradient.
"""
reshaped_grad = jnp.reshape(grad, self._transformed_shape)
partitioned_grads = self._partitioner.partition(reshaped_grad)
preconditioned_partitioned_grads = []
for i, g in enumerate(partitioned_grads):
should_preconditioned_dims = self.should_precondition_dims()
num_preconditioners = sum(should_preconditioned_dims)
preconditioners_for_grad = preconditioners[
i * num_preconditioners : (i + 1) * num_preconditioners
]
precond_g = g
rank = len(g.shape)
for j, precondition in enumerate(should_preconditioned_dims):
if precondition:
precond_g = jnp.tensordot(
precond_g, preconditioners_for_grad[j], axes=[[0], [0]]
)
else:
precond_g = jnp.transpose(precond_g, axes=(*range(1, rank), 0))
preconditioned_partitioned_grads.append(precond_g)
merged_grad = self._partitioner.merge_partitions(
preconditioned_partitioned_grads
)
return jnp.reshape(merged_grad, self._original_shape) | Precondition the gradient.
Args:
grad: A gradient tensor to precondition.
preconditioners: A list of preconditioners to apply.
Returns:
A preconditioned gradient. | preconditioned_grad | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _convert_to_parameter_stats(global_stats, local_stat, convert_statistics=True):
"""Creates parameter stats from sharded stats."""
index_start = int(local_stat.index_start)
index_end = int(len(local_stat.sizes)) + index_start
statistics = global_stats.statistics[index_start:index_end, :, :]
preconditioners = global_stats.preconditioners[index_start:index_end, :, :]
new_statistics = []
new_preconditioners = []
for i, size in enumerate(local_stat.sizes):
new_statistics.append(statistics[i][:size, :size])
new_preconditioners.append(preconditioners[i][:size, :size])
if not convert_statistics:
new_statistics = None
return ParameterStats(
local_stat.diagonal_statistics,
new_statistics,
new_preconditioners,
local_stat.diagonal_momentum,
local_stat.momentum,
local_stat.training_metrics,
) | Creates parameter stats from sharded stats. | _convert_to_parameter_stats | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _convert_from_parameter_stats(parameter_stats, local_stats):
"""Creates sharded stats from paramter stats."""
return LocalShardedParameterStats(
parameter_stats.diagonal_statistics,
parameter_stats.diagonal_momentum,
parameter_stats.momentum,
parameter_stats.training_metrics,
local_stats.index_start,
local_stats.sizes,
) | Creates sharded stats from paramter stats. | _convert_from_parameter_stats | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _add_error_into_local_stats(local_stats, errors, inverse_failure_threshold):
"""Adds errors back into local statistics."""
new_local_stats = []
for local_stat in local_stats:
if local_stat.sizes:
index_start = int(local_stat.index_start)
index_end = int(len(local_stat.sizes)) + index_start
per_stat_error = errors[index_start:index_end]
else:
per_stat_error = jnp.array(0, jnp.float32)
if local_stat.sizes:
per_stat_error = jnp.where(
jnp.logical_and(
per_stat_error > 0.0, per_stat_error != inverse_failure_threshold
),
per_stat_error,
local_stat.training_metrics.inverse_pth_root_errors,
)
new_local_stats.append(
LocalShardedParameterStats(
local_stat.diagonal_statistics,
local_stat.diagonal_momentum,
local_stat.momentum,
TrainingMetrics(per_stat_error),
local_stat.index_start,
local_stat.sizes,
)
)
return new_local_stats | Adds errors back into local statistics. | _add_error_into_local_stats | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def batch(x, num_devices):
"""Batch `x` so that so that leading axis is num_devices."""
n = len(x)
b = int(n / num_devices)
return jnp.stack([jnp.stack(x[idx : idx + b]) for idx in range(0, n, b)]) | Batch `x` so that so that leading axis is num_devices. | batch | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def unbatch(batched_values):
"""Unbatch values across leading axis and return a list of elements."""
b1, b2 = batched_values.shape[0], batched_values.shape[1]
results = []
for v_array in jnp.split(batched_values, indices_or_sections=b1, axis=0):
v_array = jnp.squeeze(v_array)
# b2 = batches (number of preconditioner computation) per core.
if b2 > 1:
for v in jnp.split(v_array, indices_or_sections=b2, axis=0):
results.append(jnp.squeeze(v))
else:
results.append(v_array)
return results | Unbatch values across leading axis and return a list of elements. | unbatch | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _graft_type_has_diagonal_statistics():
"""Returns True if using diagonal firt order method for grafting."""
return graft_type != GraftingType.SGD and graft_type != GraftingType.SQRT_N | Returns True if using diagonal firt order method for grafting. | distributed_shampoo._graft_type_has_diagonal_statistics | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def preconditioner_from_params(param):
"""Returns a Preconditioner object for given param."""
return Preconditioner(
param,
block_size,
merge_small_dims_block_size,
best_effort_shape_interpretation,
precondtioner_type,
) | Returns a Preconditioner object for given param. | distributed_shampoo.preconditioner_from_params | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def sharded_init_fn(params):
"""Returns optimizer state (for PJIT mode).
Args:
params: the parameters that should be updated.
"""
params_flat, treedef = jax.tree_flatten(params)
# Find max size to pad to.
max_size = 0
for param in params_flat:
preconditioner = preconditioner_from_params(param)
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
sizes = [s[0] for s in shapes]
max_size = max(max(sizes), max_size)
padded_statistics = []
padded_preconditioners = []
local_stats_flat = []
exponents = []
for param in params_flat:
preconditioner = preconditioner_from_params(param)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
statistics = []
preconditioners = []
index_start = len(padded_statistics)
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
statistics = [
matrix_epsilon * jnp.eye(max_size, dtype=jnp.float32)
for s in shapes
]
preconditioners = [jnp.eye(max_size, dtype=jnp.float32) for s in shapes]
padded_statistics.extend(statistics)
padded_preconditioners.extend(preconditioners)
exponent = (
preconditioner.exponent_for_preconditioner()
if exponent_override == 0
else exponent_override
)
exponents.extend([exponent] * len(shapes))
diagonal_statistics = _quantize_diagonal_statistics(jnp.zeros_like(param))
diagonal_momentum = _quantize_momentum(jnp.zeros_like(param))
momentum = _quantize_momentum(jnp.zeros_like(param))
local_stats_flat.append(
LocalShardedParameterStats(
diagonal_statistics,
diagonal_momentum,
momentum,
init_training_metrics(len(sizes)),
index_start,
sizes,
)
)
local_stats = jax.tree_unflatten(treedef, local_stats_flat)
to_pad = -len(padded_statistics) % num_devices_for_pjit
if max_size == 0:
to_pad = num_devices_for_pjit
max_size = block_size
stat_dtype = jnp.float32
else:
stat_dtype = padded_statistics[0].dtype
# Pad the statistics and preconditioner matrices to be a multiple of
# num devices.
# TODO(rohananil): Relax to only the size of the mesh axis where the dim
# is split on.
padded_statistics.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)]
)
padded_preconditioners.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
global_stats = GlobalShardedParameterStats(
jnp.stack(padded_statistics),
jnp.stack(padded_preconditioners),
jnp.stack(exponents),
)
return ShampooState(
count=jnp.zeros([], jnp.int32),
stats=ShardedShampooStats(global_stats, local_stats),
) | Returns optimizer state (for PJIT mode).
Args:
params: the parameters that should be updated. | distributed_shampoo.sharded_init_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _remove_leading_sharding_annotation(pspec):
"""Mapping from N-d to (N-1)-d, used for quantization, factoring etc."""
# None and PSpec(None) are valid PSpecs.
if pspec and len(pspec) > 1:
return pjit.PartitionSpec(*pspec[1:])
else:
return [] | Mapping from N-d to (N-1)-d, used for quantization, factoring etc. | distributed_shampoo._remove_leading_sharding_annotation | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def sharded_init_partition_spec_fn(
params, params_partition_spec, partition_spec_for_statistics
):
"""Returns a parallel state tree with PartitionSpec associated with state.
Args:
params: A pytree with params.
params_partition_spec: A pytree with PartitionSpec for params.
partition_spec_for_statistics: PartitionSpec for the statistics.
"""
# Parallel lists of spec, and params.
param_pspec_flat, _ = jax.tree_flatten(
params_partition_spec, is_leaf=lambda x: x is None
)
params_flat, treedef = jax.tree_flatten(params)
assert param_pspec_flat
assert params_flat
# Step is replicated across cores.
# None means cores.
local_stats_flat = []
num_statistics = 0
for param, param_pspec in zip(params_flat, param_pspec_flat):
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
index_start = num_statistics
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
num_statistics += len(shapes)
qdtype = quantized_dtype_for_momentum_buffers(param)
m1_pspec = param_pspec
m2_pspec = param_pspec
m1_scale_pspec = []
m2_scale_pspec = []
if qdtype != jnp.float32:
m1_scale_pspec = _remove_leading_sharding_annotation(m1_pspec)
m2_scale_pspec = _remove_leading_sharding_annotation(m2_pspec)
local_stats_flat.append(
LocalShardedParameterStats(
QuantizedValue(
param_pspec, [], [], jnp.float32, False, list(param.shape)
),
QuantizedValue(
m1_pspec, [], m1_scale_pspec, qdtype, False, list(param.shape)
),
QuantizedValue(
m2_pspec, [], m2_scale_pspec, qdtype, False, list(param.shape)
),
init_training_metrics_pspec(),
index_start,
sizes,
)
)
local_stats = jax.tree_unflatten(treedef, local_stats_flat)
global_stats = GlobalShardedParameterStats(
partition_spec_for_statistics,
partition_spec_for_statistics,
pjit.PartitionSpec(),
)
count_pspec = pjit.PartitionSpec()
return ShampooState(
count=count_pspec, stats=ShardedShampooStats(global_stats, local_stats)
) | Returns a parallel state tree with PartitionSpec associated with state.
Args:
params: A pytree with params.
params_partition_spec: A pytree with PartitionSpec for params.
partition_spec_for_statistics: PartitionSpec for the statistics. | distributed_shampoo.sharded_init_partition_spec_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def sharded_init_shape_and_dtype_fn(params):
"""Returns a parallel state tree with shape, dtype associated with state.
Args:
params: A pytree with params.
"""
# Parallel lists of spec, and params.
params_flat, treedef = jax.tree_flatten(params)
assert params_flat
# Step is replicated across cores.
# None means cores.
local_stats_flat = []
num_statistics = 0
for param in params_flat:
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
index_start = num_statistics
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
num_statistics += len(shapes)
qdtype = quantized_dtype_for_momentum_buffers(param)
m1_shape_and_dtype = [list(param.shape), param.dtype]
m2_shape_and_dtype = [list(param.shape), param.dtype]
m1_scale_shape_and_dtype = []
m2_scale_shape_and_dtype = []
if qdtype != jnp.float32:
m1_scale_shape_and_dtype = [list(param.shape)[1:], qdtype]
m2_scale_shape_and_dtype = [list(param.shape)[1:], qdtype]
diagonal_statistics_shape_and_dtype = [list(param.shape), param.dtype]
local_stats_flat.append(
LocalShardedParameterStats(
QuantizedValue(
diagonal_statistics_shape_and_dtype,
[],
[],
jnp.float32,
False,
list(param.shape),
),
QuantizedValue(
m1_shape_and_dtype,
[],
m1_scale_shape_and_dtype,
qdtype,
False,
list(param.shape),
),
QuantizedValue(
m2_shape_and_dtype,
[],
m2_scale_shape_and_dtype,
qdtype,
False,
list(param.shape),
),
init_training_metrics_shapes(len(sizes)),
index_start,
sizes,
)
)
local_stats = jax.tree_unflatten(treedef, local_stats_flat)
max_statistics_size = _max_statistics_size_from_params(params_flat)
to_pad = -num_statistics % num_devices_for_pjit
num_statistics += to_pad
if num_statistics == 0:
num_statistics = num_devices_for_pjit
max_statistics_size = block_size
statistics_shape = [num_statistics, max_statistics_size, max_statistics_size]
global_stats = GlobalShardedParameterStats(
[statistics_shape, jnp.float32],
[statistics_shape, jnp.float32],
[[num_statistics], jnp.int32],
)
return ShampooState(
count=[[], jnp.float32],
stats=ShardedShampooStats(global_stats, local_stats),
) | Returns a parallel state tree with shape, dtype associated with state.
Args:
params: A pytree with params. | distributed_shampoo.sharded_init_shape_and_dtype_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def sharded_update_fn(grads, state, params):
"""Transform the input gradient and update all statistics in sharded mode.
Args:
grads: the gradient tensors for the parameters.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
params_flat, treedef = jax.tree_flatten(params)
grads_flat = treedef.flatten_up_to(grads)
global_stats = state.stats.global_stats
local_stats_flat = treedef.flatten_up_to(state.stats.local_stats)
stats_flat = [
_convert_to_parameter_stats(global_stats, local_stat)
for local_stat in local_stats_flat
]
new_stats_flat = jax.tree_map(
lambda g, s, p: _compute_stats(g, s, p, state.count),
grads_flat,
stats_flat,
params_flat,
)
outputs = jax.tree_map(
lambda g, s, p: _transform_grad(g, s, p, state.count),
grads_flat,
new_stats_flat,
params_flat,
)
updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())
updates = jax.tree_unflatten(treedef, updates_flat)
# Create new local_stats
new_local_stats_flat = [
_convert_from_parameter_stats(new_stat, local_stat)
for new_stat, local_stat in zip(new_stats_flat, local_stats_flat)
]
max_size = global_stats.statistics.shape[1]
new_padded_statistics = []
for stat in new_stats_flat:
new_padded_statistics.extend(
[pad_square_matrix(stat, max_size) for stat in stat.statistics]
)
# Create global stats
# TODO(rohananil): Preconditioner is not updated every step, so cost of
# stack/pad can be obviated away.
# Pad the statistics and preconditioner matrices to be a multiple of
# num devices.
# TODO(rohananil): Relax to only the size of the mesh axis where the dim
# is split on.
to_pad = -len(new_padded_statistics) % num_devices_for_pjit
if not new_padded_statistics:
to_pad = num_devices_for_pjit
stat_dtype = jnp.float32
else:
stat_dtype = new_padded_statistics[0].dtype
new_padded_statistics.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)]
)
new_stacked_padded_statistics = jnp.stack(new_padded_statistics)
new_stacked_padded_statistics = pjit.with_sharding_constraint(
new_stacked_padded_statistics, statistics_partition_spec
)
def _internal_inverse_pth_root_all():
preconditioners, errors = _matrix_inverse_pth_root_pjit(
new_stacked_padded_statistics,
global_stats.exponents,
statistics_partition_spec,
)
return preconditioners, errors
if preconditioning_compute_steps == 1:
new_preconditioners, errors = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
preconditioners_init = new_stacked_padded_statistics
n = new_stacked_padded_statistics.shape[0]
errors_init = jnp.ones([n], jnp.float32) * inverse_failure_threshold
init_state = [preconditioners_init, errors_init]
perform_step = state.count % preconditioning_compute_steps == 0
new_preconditioners, errors = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state
)
new_local_stats_flat = _add_error_into_local_stats(
new_local_stats_flat, errors, inverse_failure_threshold
)
new_local_stats = jax.tree_unflatten(treedef, new_local_stats_flat)
errors = errors.reshape((-1, 1, 1))
predicate = jnp.logical_or(
jnp.isnan(errors), errors >= inverse_failure_threshold
).astype(new_preconditioners.dtype)
# TODO(rohananil): Check for numerical instabilities.
new_conditional_preconditioners = (
predicate * global_stats.preconditioners
+ (1.0 - predicate) * new_preconditioners
)
new_global_stats = GlobalShardedParameterStats(
new_stacked_padded_statistics,
new_conditional_preconditioners,
global_stats.exponents,
)
new_shampoo_state = ShampooState(
count=state.count + 1,
stats=ShardedShampooStats(new_global_stats, new_local_stats),
)
return updates, new_shampoo_state | Transform the input gradient and update all statistics in sharded mode.
Args:
grads: the gradient tensors for the parameters.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state. | distributed_shampoo.sharded_update_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def init_fn(params):
"""Initialise the optimiser's state."""
def _init(param):
preconditioner = preconditioner_from_params(param)
statistics = []
preconditioners = []
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
statistics = [
matrix_epsilon * jnp.eye(s[0], dtype=jnp.float32) for s in shapes
]
preconditioners = [jnp.eye(s[0], dtype=jnp.float32) for s in shapes]
diagonal_statistics = []
if _graft_type_has_diagonal_statistics():
diagonal_statistics = jnp.zeros_like(param)
diagonal_momentum = _quantize_momentum(jnp.zeros_like(param))
momentum = _quantize_momentum(jnp.zeros_like(param))
return ParameterStats(
_quantize_diagonal_statistics(diagonal_statistics),
_maybe_quantize_statistics(statistics),
_maybe_quantize_preconditioners(preconditioners),
diagonal_momentum,
momentum,
init_training_metrics(len(statistics)),
)
return ShampooState(
count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params)
) | Initialise the optimiser's state. | distributed_shampoo.init_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _compute_stats(grad, state, param, step):
"""Compute per-parameter statistics."""
preconditioner = preconditioner_from_params(param)
new_statistics = [[]] * len(state.statistics)
w1 = beta2
w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)
if not _skip_preconditioning(param):
def compute_updated_statistics():
return preconditioner.updated_statistics_from_grad(
state.statistics,
grad,
w1=w1,
w2=w2,
to_float=_to_float,
from_float=lambda x: _maybe_quantize_statistics([x])[0],
precision=tensordot_precision,
)
if statistics_compute_steps > 1:
perform_step = step % statistics_compute_steps == 0
init_state = state.statistics
new_statistics = list(
efficient_cond(perform_step, compute_updated_statistics, init_state)
)
else:
new_statistics = compute_updated_statistics()
return ParameterStats(
state.diagonal_statistics,
new_statistics,
state.preconditioners,
state.diagonal_momentum,
state.momentum,
state.training_metrics,
) | Compute per-parameter statistics. | distributed_shampoo._compute_stats | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _pmap_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
):
"""Computes preconditioners for given statistics in states in PMAP mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
if batch_axis_name:
num_devices = lax.psum(1, batch_axis_name)
else:
num_devices = 1
num_statistics = len(statistics)
# Pad statistics and exponents to next multiple of num_devices.
packed_statistics = [pad_square_matrix(stat, max_size) for stat in statistics]
to_pad = -num_statistics % num_devices
packed_statistics.extend(
[jnp.eye(max_size, dtype=packed_statistics[0].dtype) for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
if not packed_statistics:
return states
all_statistics = batch(packed_statistics, num_devices)
all_exponents = batch(exponents, num_devices)
def _internal_inverse_pth_root_all():
if batch_axis_name:
current_replica = lax.axis_index(batch_axis_name)
preconditioners, errors = _matrix_inverse_pth_root_vmap(
all_statistics[current_replica], all_exponents[current_replica]
)
preconditioners = jax.lax.all_gather(preconditioners, batch_axis_name)
errors = jax.lax.all_gather(errors, batch_axis_name)
preconditioners_flat = unbatch(preconditioners)
errors_flat = unbatch(errors)
else:
preconditioners, errors = _matrix_inverse_pth_root_vmap(
all_statistics[0], all_exponents[0]
)
preconditioners_flat = unbatch(jnp.stack([preconditioners]))
errors_flat = unbatch(jnp.stack([errors]))
return preconditioners_flat, errors_flat
if preconditioning_compute_steps == 1:
preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
preconditioners_init = packed_statistics
errors_init = [inverse_failure_threshold] * len(packed_statistics)
init_state = [preconditioners_init, errors_init]
perform_step = step % preconditioning_compute_steps == 0
preconditioners_flat, errors_flat = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state
)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold
)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None
)
new_preconditioners_flat = []
new_errors_flat = []
for p, shape, prev_p, error in zip(
preconditioners_flat, original_shapes, prev_preconditioners, errors_flat
):
new_preconditioners_flat.append(
_select_preconditioner(error, p[: shape[0], : shape[1]], prev_p)
)
new_errors_flat.append(error)
assert len(states) == len(num_statistics_per_state)
assert len(new_preconditioners_flat) == num_statistics
assert len(new_errors_flat) == num_statistics
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
idx = 0
errors_for_states = []
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
errors_for_states.append(jnp.array(0, jnp.float32))
else:
preconditioners_for_state = new_preconditioners_flat[
idx : idx + num_statistics
]
assert len(state.statistics) == len(preconditioners_for_state)
preconditioners_for_states.append(preconditioners_for_state)
errors_for_state = jnp.stack(
new_errors_flat[idx : idx + num_statistics]
)
assert len(state.statistics) == len(errors_for_state)
errors_for_states.append(errors_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_errors in zip(
states, preconditioners_for_states, errors_for_states
):
if state.statistics:
new_errors = jnp.where(
jnp.logical_and(
new_errors > 0.0, new_errors != inverse_failure_threshold
),
new_errors,
state.training_metrics.inverse_pth_root_errors,
)
new_training_metrics = TrainingMetrics(new_errors)
new_states.append(
ParameterStats(
state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_training_metrics,
)
)
return new_states | Computes preconditioners for given statistics in states in PMAP mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner. | distributed_shampoo._pmap_compute_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _pmap_quantized_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
):
"""Computes preconditioners for given statistics in states in PMAP mode.
For quantization, each statistic is represented by three values:
quantized matrix, diagonal, and bucket sizes, we run inverse pth-roots
without ever recreating the original matrix in f32.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
num_devices = lax.psum(1, batch_axis_name)
num_statistics = len(statistics)
quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers()
# Complexity here is around: shapes needing be statically shaped,
# our custom quantization type requires a different type of packing.
# Parallel tensors:
# quantized [dxd]
# diagonals [d] f32
# bucket_sizes [d] f32
packed_quantized_statistics = [
pad_square_matrix(stat.quantized, max_size) for stat in statistics
]
packed_quantized_diagonals = [
pad_vector(stat.diagonal, max_size) for stat in statistics
]
packed_quantized_bucket_sizes = [
pad_vector(stat.bucket_size, max_size) for stat in statistics
]
to_pad = -num_statistics % num_devices
padded_eye = jnp.eye(max_size, dtype=jnp.float32)
quantized_eye = QuantizedValue.from_float_value(
padded_eye, quantized_dtype, True
)
packed_quantized_statistics.extend(
[quantized_eye.quantized for _ in range(to_pad)]
)
packed_quantized_diagonals.extend(
[quantized_eye.diagonal for _ in range(to_pad)]
)
packed_quantized_bucket_sizes.extend(
[quantized_eye.bucket_size for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
if not packed_quantized_statistics:
return states
all_quantized_statistics = batch(packed_quantized_statistics, num_devices)
all_quantized_diagonals = batch(packed_quantized_diagonals, num_devices)
all_quantized_bucket_sizes = batch(packed_quantized_bucket_sizes, num_devices)
all_exponents = batch(exponents, num_devices)
def _internal_inverse_pth_root_all():
current_replica = lax.axis_index(batch_axis_name)
(
quantized_preconditioners,
quantized_diagonals,
quantized_bucket_sizes,
errors,
) = _quantized_matrix_inverse_pth_root_vmap(
all_quantized_statistics[current_replica],
all_quantized_diagonals[current_replica],
all_quantized_bucket_sizes[current_replica],
all_exponents[current_replica],
)
quantized_preconditioners = jax.lax.all_gather(
quantized_preconditioners, batch_axis_name
)
quantized_diagonals = jax.lax.all_gather(
quantized_diagonals, batch_axis_name
)
quantized_bucket_sizes = jax.lax.all_gather(
quantized_bucket_sizes, batch_axis_name
)
errors = jax.lax.all_gather(errors, batch_axis_name)
quantized_preconditioners_flat = unbatch(quantized_preconditioners)
quantized_diagonals_flat = unbatch(quantized_diagonals)
quantized_bucket_sizes_flat = unbatch(quantized_bucket_sizes)
errors_flat = unbatch(errors)
return (
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
errors_flat,
)
if preconditioning_compute_steps == 1:
(
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
errors_flat,
) = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
quantized_preconditioners_init = packed_quantized_statistics
quantized_diagonals_init = packed_quantized_diagonals
quantized_bucket_sizes_init = packed_quantized_bucket_sizes
errors_init = [inverse_failure_threshold] * len(
quantized_preconditioners_init
)
init_state = [
quantized_preconditioners_init,
quantized_diagonals_init,
quantized_bucket_sizes_init,
errors_init,
]
perform_step = step % preconditioning_compute_steps == 0
(
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
errors_flat,
) = efficient_cond(perform_step, _internal_inverse_pth_root_all, init_state)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold
)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None
)
new_quantized_preconditioners_flat = []
new_quantized_diagonals_flat = []
new_quantized_bucket_sizes_flat = []
new_errors_flat = []
for p, d, b, shape, prev_p, error in zip(
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
original_shapes,
prev_preconditioners,
errors_flat,
):
new_quantized_preconditioners_flat.append(
_select_preconditioner(
error, p[: shape[0], : shape[1]], prev_p.quantized
)
)
new_quantized_diagonals_flat.append(
_select_preconditioner(error, d[: shape[0]], prev_p.diagonal)
)
new_quantized_bucket_sizes_flat.append(
_select_preconditioner(error, b[: shape[0]], prev_p.bucket_size)
)
new_errors_flat.append(error)
assert len(states) == len(num_statistics_per_state)
assert len(new_quantized_preconditioners_flat) == num_statistics
assert len(new_quantized_diagonals_flat) == num_statistics
assert len(new_quantized_bucket_sizes_flat) == num_statistics
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
errors_for_states = []
idx = 0
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
errors_for_states.append(jnp.array(0, jnp.float32))
else:
quantized_preconditioners_for_state = (
new_quantized_preconditioners_flat[idx : idx + num_statistics]
)
quantized_diagonals_for_state = new_quantized_diagonals_flat[
idx : idx + num_statistics
]
quantized_bucket_sizes_for_state = new_quantized_bucket_sizes_flat[
idx : idx + num_statistics
]
errors_for_state = jnp.stack(
new_errors_flat[idx : idx + num_statistics]
)
assert len(state.statistics) == len(quantized_preconditioners_for_state)
assert len(state.statistics) == len(quantized_diagonals_for_state)
assert len(state.statistics) == len(quantized_bucket_sizes_for_state)
assert len(state.statistics) == len(errors_for_state)
quantized_preconditioners = []
for qv, qd, qb in zip(
quantized_preconditioners_for_state,
quantized_diagonals_for_state,
quantized_bucket_sizes_for_state,
):
quantized_preconditioners.append(
QuantizedValue(qv, qd, qb, qv.dtype, True, list(qv.shape))
)
preconditioners_for_states.append(quantized_preconditioners)
errors_for_states.append(errors_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_errors in zip(
states, preconditioners_for_states, errors_for_states
):
if state.statistics:
new_errors = jnp.where(
jnp.logical_and(
new_errors > 0.0, new_errors != inverse_failure_threshold
),
new_errors,
state.training_metrics.inverse_pth_root_errors,
)
new_training_metrics = TrainingMetrics(new_errors)
new_states.append(
ParameterStats(
state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_training_metrics,
)
)
return new_states | Computes preconditioners for given statistics in states in PMAP mode.
For quantization, each statistic is represented by three values:
quantized matrix, diagonal, and bucket sizes, we run inverse pth-roots
without ever recreating the original matrix in f32.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner. | distributed_shampoo._pmap_quantized_compute_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _pjit_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
):
"""Computes preconditioners for given statistics in states in PJIT mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
num_statistics = len(statistics)
to_pad = -num_statistics % num_devices_for_pjit
padded_statistics = [pad_square_matrix(stat, max_size) for stat in statistics]
padded_statistics.extend(
[jnp.eye(max_size, dtype=padded_statistics[0].dtype) for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
all_statistics = jnp.stack(padded_statistics)
all_exponents = jnp.stack(exponents)
def _internal_inverse_pth_root_all():
preconditioners, errors = _matrix_inverse_pth_root_pjit(
all_statistics, all_exponents
)
b1 = preconditioners.shape[0]
def split(batched_values):
return [
jnp.squeeze(v)
for v in jnp.split(batched_values, indices_or_sections=b1, axis=0)
]
return split(preconditioners), split(errors)
if preconditioning_compute_steps == 1:
preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
preconditioners_init = padded_statistics
errors_init = [inverse_failure_threshold] * len(padded_statistics)
init_state = [preconditioners_init, errors_init]
perform_step = step % preconditioning_compute_steps == 0
preconditioners_flat, errors_flat = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state
)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold
)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None
)
new_preconditioners_flat = []
new_errors_flat = []
for p, shape, prev_p, error in zip(
preconditioners_flat, original_shapes, prev_preconditioners, errors_flat
):
new_preconditioners_flat.append(
_select_preconditioner(error, p[: shape[0], : shape[1]], prev_p)
)
new_errors_flat.append(error)
assert len(states) == len(num_statistics_per_state)
assert len(new_preconditioners_flat) == num_statistics
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
errors_for_states = []
idx = 0
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
errors_for_states.append(jnp.array(0, jnp.float32))
else:
preconditioners_for_state = new_preconditioners_flat[
idx : idx + num_statistics
]
assert len(state.statistics) == len(preconditioners_for_state)
preconditioners_for_states.append(preconditioners_for_state)
errors_for_state = jnp.stack(
new_errors_flat[idx : idx + num_statistics]
)
assert len(state.statistics) == len(errors_for_state)
errors_for_states.append(errors_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_errors in zip(
states, preconditioners_for_states, errors_for_states
):
if state.statistics:
new_errors = jnp.where(
jnp.logical_and(
new_errors > 0.0, new_errors != inverse_failure_threshold
),
new_errors,
state.training_metrics.inverse_pth_root_errors,
)
new_training_metrics = TrainingMetrics(new_errors)
new_states.append(
ParameterStats(
state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_training_metrics,
)
)
return new_states | Computes preconditioners for given statistics in states in PJIT mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner. | distributed_shampoo._pjit_compute_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _compute_preconditioners(states, params, step):
"""Computes preconditioners for given statistics in states.
Args:
states: A list of optimizer states.
params: A list of params.
step: Current step number
Returns:
New optimizer states after computing the preconditioner.
"""
statistics = []
num_statistics_per_state = []
original_shapes = []
exponents = []
max_size = 0
prev_preconditioners = []
for state, param in zip(states, params):
num_statistics = len(state.statistics)
num_statistics_per_state.append(num_statistics)
original_shapes_for_state = []
if num_statistics > 0:
preconditioner = preconditioner_from_params(param)
for statistic in state.statistics:
exponents.append(
preconditioner.exponent_for_preconditioner()
if exponent_override == 0
else exponent_override
)
original_shapes_for_state.append(statistic.shape)
max_size = max(max_size, statistic.shape[0])
statistics.extend(state.statistics)
prev_preconditioners.extend(state.preconditioners)
original_shapes.extend(original_shapes_for_state)
if not shard_optimizer_states:
# Quantization is only enabled if batch_axis_name is not set.
quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers()
if quantized_dtype == jnp.float32:
return _pmap_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
)
else:
return _pmap_quantized_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
)
else:
return _pjit_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
) | Computes preconditioners for given statistics in states.
Args:
states: A list of optimizer states.
params: A list of params.
step: Current step number
Returns:
New optimizer states after computing the preconditioner. | distributed_shampoo._compute_preconditioners | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def _transform_grad(grad, state, param, step):
"""Transform per-parameter gradients."""
preconditioner = preconditioner_from_params(param)
sgd_update = grad
new_diagonal_statistics = state.diagonal_statistics.to_float()
if (
graft_type == GraftingType.ADAGRAD
or graft_type == GraftingType.ADAGRAD_NORMALIZED
):
scaled_grad = grad
if graft_type == GraftingType.ADAGRAD_NORMALIZED:
scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16)
new_diagonal_statistics = state.diagonal_statistics.to_float() + jnp.square(
scaled_grad
)
adagrad_update = scaled_grad / (
jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon
)
grafting_update = adagrad_update
elif (
graft_type == GraftingType.RMSPROP
or graft_type == GraftingType.RMSPROP_NORMALIZED
):
scaled_grad = grad
if graft_type == GraftingType.RMSPROP_NORMALIZED:
scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16)
w1 = beta2
w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)
new_diagonal_statistics = (
w1 * state.diagonal_statistics.to_float() + w2 * jnp.square(scaled_grad)
)
rmsprop_update = scaled_grad / (
jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon
)
if clip_by_scaled_gradient_norm:
scaled_grad_norm = jnp.linalg.norm(rmsprop_update) / (
jnp.sqrt(float(rmsprop_update.size))
)
clipping_denom = jnp.maximum(
1.0, scaled_grad_norm / clip_by_scaled_gradient_norm
)
rmsprop_update /= clipping_denom
grafting_update = rmsprop_update
elif graft_type == GraftingType.SGD:
grafting_update = sgd_update
else:
grafting_update = jnp.ones_like(sgd_update) * jnp.sign(sgd_update)
lr = learning_rate
if callable(learning_rate):
lr = learning_rate(step)
preconditioner_multiplier = lr if not decoupled_learning_rate else 1.0
grafting_update = grafting_update * preconditioner_multiplier
precond_grad = grad
if not _skip_preconditioning(param):
precond_grad = preconditioner.preconditioned_grad(
precond_grad, _maybe_dequantize_preconditioners(state.preconditioners)
)
else:
precond_grad = grafting_update
grafting_update_norm = jnp.linalg.norm(grafting_update)
precond_grad_norm = jnp.linalg.norm(precond_grad)
multiplier = grafting_update_norm / (precond_grad_norm + 1e-16)
shampoo_update = precond_grad * multiplier
shampoo_update_with_wd = shampoo_update
grafting_update_with_wd = grafting_update
if weight_decay != 0 and not decoupled_weight_decay:
shampoo_update_with_wd = shampoo_update + weight_decay * param
grafting_update_with_wd = grafting_update + weight_decay * param
w = (1.0 - beta1) if moving_average_for_momentum else 1.0
shampoo_update_with_wd_momentum = (
state.momentum.to_float() * beta1 + w * shampoo_update_with_wd
)
grafting_update_with_wd_momentum = (
state.diagonal_momentum.to_float() * beta1 + w * grafting_update_with_wd
)
run_shampoo = (step >= start_preconditioning_step).astype(
grafting_update_with_wd_momentum.dtype
)
momentum_update = (
run_shampoo * shampoo_update_with_wd_momentum
+ (1.0 - run_shampoo) * grafting_update_with_wd_momentum
)
wd_update = (
run_shampoo * shampoo_update_with_wd
+ (1.0 - run_shampoo) * grafting_update_with_wd
)
nesterov_momentum_update = momentum_update
if nesterov:
nesterov_momentum_update = w * wd_update + beta1 * momentum_update
if weight_decay != 0 and decoupled_weight_decay:
nesterov_momentum_update = (
nesterov_momentum_update + lr * weight_decay * param
)
momentum_multiplier = lr if decoupled_learning_rate else 1.0
transformed_update = -1.0 * momentum_multiplier * nesterov_momentum_update
new_diagonal_momentum = grafting_update_with_wd_momentum
new_momentum = shampoo_update_with_wd_momentum
param_stats = ParameterStats(
_quantize_diagonal_statistics(new_diagonal_statistics),
state.statistics,
state.preconditioners,
_quantize_momentum(new_diagonal_momentum),
_quantize_momentum(new_momentum),
state.training_metrics,
)
return transformed_update, param_stats | Transform per-parameter gradients. | distributed_shampoo._transform_grad | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def update_fn(grads, state, params):
"""Transform the input gradient and update all statistics.
Args:
grads: the gradient tensors for the parameters
and any custom gradients for preconditioners.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
params_flat, treedef = jax.tree_flatten(params)
stats_flat = treedef.flatten_up_to(state.stats)
grads_flat = treedef.flatten_up_to(grads)
stats_grads = grads_flat
new_stats_flat = jax.tree_map(
lambda g, s, p: _compute_stats(g, s, p, state.count),
stats_grads,
stats_flat,
params_flat,
)
new_stats_flat = _compute_preconditioners(
new_stats_flat, params_flat, state.count
)
outputs = jax.tree_map(
lambda g, s, p: _transform_grad(g, s, p, state.count),
grads_flat,
new_stats_flat,
params_flat,
)
updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())
updates = jax.tree_unflatten(treedef, updates_flat)
new_stats = jax.tree_unflatten(treedef, new_stats_flat)
new_state = ShampooState(count=state.count + 1, stats=new_stats)
return updates, new_state | Transform the input gradient and update all statistics.
Args:
grads: the gradient tensors for the parameters
and any custom gradients for preconditioners.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state. | distributed_shampoo.update_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def distributed_shampoo(
learning_rate,
block_size,
beta1=0.9,
beta2=0.999,
diagonal_epsilon=1e-10,
matrix_epsilon=1e-6,
weight_decay=0.0,
start_preconditioning_step=5,
preconditioning_compute_steps=1,
statistics_compute_steps=1,
best_effort_shape_interpretation=True,
graft_type=GraftingType.SGD,
nesterov=True,
exponent_override=0,
# Pass pmap 'batch axis name' in pmap mode.
batch_axis_name=None,
### Only set following 3 params in pjit/spmd mode.
### WARNING: Experimental
statistics_partition_spec=None,
preconditioner_partition_spec=None,
num_devices_for_pjit=None,
shard_optimizer_states=False,
###
### Experimental memory reduction mode
best_effort_memory_usage_reduction=False,
###
inverse_failure_threshold=0.1,
moving_average_for_momentum=False,
skip_preconditioning_dim_size_gt=4096,
clip_by_scaled_gradient_norm=None,
precision=lax.Precision.HIGHEST,
tensordot_precision=None,
relative_matrix_epsilon=True,
merge_small_dims_block_size=4096,
lobpcg_topk_precondition=0,
lobpcg_max_iter=0,
precondtioner_type=PreconditionerType.ALL,
skip_preconditioning_rank_lt=1,
decoupled_learning_rate=True,
decoupled_weight_decay=False,
):
"""Distributed Shampoo optimizer.
Distributed Shampoo is a second-order preconditioned method (concretely, a
variant of full-matrix Adagrad), that provides significant convergence and
wall-clock time improvements compared to conventional first-order methods,
and that has been shown to scale to large state-of-the-art deep learning
models.
References:
Scalable Second Order Optimization for Deep Learning,
Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer
Preprint: https://arxiv.org/abs/2002.09018
Args:
learning_rate: the step size used to update the parameters.
block_size: Block size for large layers (if > 0). Preconditioning compute
operation is cubic in the dimension of the tensor. Block size allows us to
chunk the layers into sub-layers of maximal dimension dictated by this
value. Use 128 as default (increase if you have compute budget).
beta1: momentum parameter.
beta2: second moment averaging parameter.
diagonal_epsilon: epsilon for diagonal adagrad (only if layerwise grafting
to AdaGrad is enabled).
matrix_epsilon: epsilon to add to statistics before computing inverse pth
root. If you are running in f32 precision for inverse pth root
(recommended today) this can go upto 1e-6. If you have latest hardware
with native f64 precision, set this upto 1e-12.
weight_decay: Weight decay for regularization.
start_preconditioning_step: When to start Shampoo update before which
diagonal update is used. This is because we dont have enough information
to do stable inverse.
preconditioning_compute_steps: How often to compute preconditioner.
Performance tuning params for controlling memory and compute requirements.
Ideally set this and statistics_compute_steps params to 1.
statistics_compute_steps: How often to compute statistics.
best_effort_shape_interpretation: If there are some small dimensions,
collapse them e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if
block = 1024, [1, 2, 768, 1, 2048] --> [2, 768, 2048]
graft_type: Grafting is a technique to fix the layerwise scale of Shampoo
optimizer. This allows us to plugin the Shampoo optimizer into settings
where SGD/AdaGrad is already well tuned.
nesterov: Nesterov momentum.
exponent_override: Override the exponent used in matrix inverse.
batch_axis_name: labeled axis over pmap for data-parallel training the
optimizer used for.
statistics_partition_spec: PartitionSpec to be used in sharded mode.
preconditioner_partition_spec: PartitionSpec to be used in sharded mode.
num_devices_for_pjit: Number of devices to parallelize over when using pjit.
shard_optimizer_states: Shard optimizer states to save memory in model
parallel training.
best_effort_memory_usage_reduction: Best effort memory usage reduction. -
diagonal_statistics -> jnp.bfloat16 - momentum buffers (2x) -> jnp.int8 -
statistics, preconditioners -> jnp.int16 + diagonals
inverse_failure_threshold: numerics are hard and inverses fail sometimes; we
determine that using this threshold.
moving_average_for_momentum: Whether to use moving average for momentum
instead of exponential moving average.
skip_preconditioning_dim_size_gt: Skip if preconditioning dim size is
greater than this value.
clip_by_scaled_gradient_norm: Clip by scaled gradient norm (only useful when
using RMSProp Grafting).
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
tensordot_precision: Optional precision to use for the tensordot operation
when computing statistics (e.g., G Gᵀ). Same options as `precision` above.
relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
value when computing inverse-pth root.
merge_small_dims_block_size: Used as the maximum block size
to merge the shapes.
lobpcg_topk_precondition: If nonzero, specifies the number of top
eigenvectors to subtract out before performing LOBPCG. Note this makes
relative_matrix_epsilon essentially free.
lobpcg_max_iter: Number of LOBPCG iterations, if zero defaults to
`lobpcg_topk_precondition`.
precondtioner_type: Preconditioner type to select all, left only or right
only preconditioners.
skip_preconditioning_rank_lt: Skips preconditioning for parameters with
rank less than this value.
decoupled_learning_rate: If True, use decoupled learning rate, otherwise
couple it with preconditioned gradient computation. (Default True)
decoupled_weight_decay: If True, use decoupled weight decay, otherwise
couple with weight decay. (Default False)
Returns:
a GradientTransformation.
"""
def _graft_type_has_diagonal_statistics():
"""Returns True if using diagonal firt order method for grafting."""
return graft_type != GraftingType.SGD and graft_type != GraftingType.SQRT_N
def quantized_dtype_for_momentum_buffers(var):
return (
jnp.int8
if best_effort_memory_usage_reduction and len(var.shape) > 1
else jnp.float32
)
# Preconditioner and statistics are both stores as int16 in this mode.
# We take out the diagonal to make quantization easier.
def quantized_dtype_for_second_moment_statistics_buffers():
return (
jnp.int16
if best_effort_memory_usage_reduction and batch_axis_name
else jnp.float32
)
# Preconditioner and statistics are both stores as int16 in this mode.
# We take out the diagonal to make quantization easier.
def quantized_dtype_for_second_moment_preconditioner_buffers():
return (
jnp.int16
if best_effort_memory_usage_reduction and batch_axis_name
else jnp.float32
)
def _to_float(maybe_quantized):
if isinstance(maybe_quantized, QuantizedValue):
return maybe_quantized.to_float()
else:
return maybe_quantized
def _maybe_quantize_statistics(statistics_list):
return _maybe_quantize_matrices_with_dtype(
statistics_list, quantized_dtype_for_second_moment_statistics_buffers()
)
def _maybe_quantize_preconditioners(statistics_list):
return _maybe_quantize_matrices_with_dtype(
statistics_list, quantized_dtype_for_second_moment_preconditioner_buffers()
)
def _maybe_quantize_matrices_with_dtype(statistics_list, quantized_dtype):
if quantized_dtype != jnp.float32:
return [
QuantizedValue.from_float_value(
s, quantized_dtype, extract_diagonal=True
)
for s in statistics_list
]
else:
return statistics_list
def _maybe_dequantize_preconditioners(preconditioner_list):
return _maybe_dequantize_matrices_with_dtype(
preconditioner_list,
quantized_dtype_for_second_moment_preconditioner_buffers(),
)
def _maybe_dequantize_matrices_with_dtype(statistics_list, quantized_dtype):
if quantized_dtype != jnp.float32:
return [s.to_float() for s in statistics_list]
else:
return statistics_list
def _quantize_diagonal_statistics(diagonal_statistics):
return QuantizedValue.from_float_value(diagonal_statistics, jnp.float32)
def _quantize_momentum(momentum_statistics):
return QuantizedValue.from_float_value(
momentum_statistics,
quantized_dtype_for_momentum_buffers(momentum_statistics),
)
def preconditioner_from_params(param):
"""Returns a Preconditioner object for given param."""
return Preconditioner(
param,
block_size,
merge_small_dims_block_size,
best_effort_shape_interpretation,
precondtioner_type,
)
def sharded_init_fn(params):
"""Returns optimizer state (for PJIT mode).
Args:
params: the parameters that should be updated.
"""
params_flat, treedef = jax.tree_flatten(params)
# Find max size to pad to.
max_size = 0
for param in params_flat:
preconditioner = preconditioner_from_params(param)
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
sizes = [s[0] for s in shapes]
max_size = max(max(sizes), max_size)
padded_statistics = []
padded_preconditioners = []
local_stats_flat = []
exponents = []
for param in params_flat:
preconditioner = preconditioner_from_params(param)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
statistics = []
preconditioners = []
index_start = len(padded_statistics)
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
statistics = [
matrix_epsilon * jnp.eye(max_size, dtype=jnp.float32)
for s in shapes
]
preconditioners = [jnp.eye(max_size, dtype=jnp.float32) for s in shapes]
padded_statistics.extend(statistics)
padded_preconditioners.extend(preconditioners)
exponent = (
preconditioner.exponent_for_preconditioner()
if exponent_override == 0
else exponent_override
)
exponents.extend([exponent] * len(shapes))
diagonal_statistics = _quantize_diagonal_statistics(jnp.zeros_like(param))
diagonal_momentum = _quantize_momentum(jnp.zeros_like(param))
momentum = _quantize_momentum(jnp.zeros_like(param))
local_stats_flat.append(
LocalShardedParameterStats(
diagonal_statistics,
diagonal_momentum,
momentum,
init_training_metrics(len(sizes)),
index_start,
sizes,
)
)
local_stats = jax.tree_unflatten(treedef, local_stats_flat)
to_pad = -len(padded_statistics) % num_devices_for_pjit
if max_size == 0:
to_pad = num_devices_for_pjit
max_size = block_size
stat_dtype = jnp.float32
else:
stat_dtype = padded_statistics[0].dtype
# Pad the statistics and preconditioner matrices to be a multiple of
# num devices.
# TODO(rohananil): Relax to only the size of the mesh axis where the dim
# is split on.
padded_statistics.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)]
)
padded_preconditioners.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
global_stats = GlobalShardedParameterStats(
jnp.stack(padded_statistics),
jnp.stack(padded_preconditioners),
jnp.stack(exponents),
)
return ShampooState(
count=jnp.zeros([], jnp.int32),
stats=ShardedShampooStats(global_stats, local_stats),
)
def _max_statistics_size_from_params(params):
max_size = 0
for param in params:
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
sizes = [s[0] for s in shapes]
max_size = max(max(sizes), max_size)
return max_size
def _remove_leading_sharding_annotation(pspec):
"""Mapping from N-d to (N-1)-d, used for quantization, factoring etc."""
# None and PSpec(None) are valid PSpecs.
if pspec and len(pspec) > 1:
return pjit.PartitionSpec(*pspec[1:])
else:
return []
def sharded_init_partition_spec_fn(
params, params_partition_spec, partition_spec_for_statistics
):
"""Returns a parallel state tree with PartitionSpec associated with state.
Args:
params: A pytree with params.
params_partition_spec: A pytree with PartitionSpec for params.
partition_spec_for_statistics: PartitionSpec for the statistics.
"""
# Parallel lists of spec, and params.
param_pspec_flat, _ = jax.tree_flatten(
params_partition_spec, is_leaf=lambda x: x is None
)
params_flat, treedef = jax.tree_flatten(params)
assert param_pspec_flat
assert params_flat
# Step is replicated across cores.
# None means cores.
local_stats_flat = []
num_statistics = 0
for param, param_pspec in zip(params_flat, param_pspec_flat):
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
index_start = num_statistics
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
num_statistics += len(shapes)
qdtype = quantized_dtype_for_momentum_buffers(param)
m1_pspec = param_pspec
m2_pspec = param_pspec
m1_scale_pspec = []
m2_scale_pspec = []
if qdtype != jnp.float32:
m1_scale_pspec = _remove_leading_sharding_annotation(m1_pspec)
m2_scale_pspec = _remove_leading_sharding_annotation(m2_pspec)
local_stats_flat.append(
LocalShardedParameterStats(
QuantizedValue(
param_pspec, [], [], jnp.float32, False, list(param.shape)
),
QuantizedValue(
m1_pspec, [], m1_scale_pspec, qdtype, False, list(param.shape)
),
QuantizedValue(
m2_pspec, [], m2_scale_pspec, qdtype, False, list(param.shape)
),
init_training_metrics_pspec(),
index_start,
sizes,
)
)
local_stats = jax.tree_unflatten(treedef, local_stats_flat)
global_stats = GlobalShardedParameterStats(
partition_spec_for_statistics,
partition_spec_for_statistics,
pjit.PartitionSpec(),
)
count_pspec = pjit.PartitionSpec()
return ShampooState(
count=count_pspec, stats=ShardedShampooStats(global_stats, local_stats)
)
def sharded_init_shape_and_dtype_fn(params):
"""Returns a parallel state tree with shape, dtype associated with state.
Args:
params: A pytree with params.
"""
# Parallel lists of spec, and params.
params_flat, treedef = jax.tree_flatten(params)
assert params_flat
# Step is replicated across cores.
# None means cores.
local_stats_flat = []
num_statistics = 0
for param in params_flat:
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
index_start = num_statistics
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
num_statistics += len(shapes)
qdtype = quantized_dtype_for_momentum_buffers(param)
m1_shape_and_dtype = [list(param.shape), param.dtype]
m2_shape_and_dtype = [list(param.shape), param.dtype]
m1_scale_shape_and_dtype = []
m2_scale_shape_and_dtype = []
if qdtype != jnp.float32:
m1_scale_shape_and_dtype = [list(param.shape)[1:], qdtype]
m2_scale_shape_and_dtype = [list(param.shape)[1:], qdtype]
diagonal_statistics_shape_and_dtype = [list(param.shape), param.dtype]
local_stats_flat.append(
LocalShardedParameterStats(
QuantizedValue(
diagonal_statistics_shape_and_dtype,
[],
[],
jnp.float32,
False,
list(param.shape),
),
QuantizedValue(
m1_shape_and_dtype,
[],
m1_scale_shape_and_dtype,
qdtype,
False,
list(param.shape),
),
QuantizedValue(
m2_shape_and_dtype,
[],
m2_scale_shape_and_dtype,
qdtype,
False,
list(param.shape),
),
init_training_metrics_shapes(len(sizes)),
index_start,
sizes,
)
)
local_stats = jax.tree_unflatten(treedef, local_stats_flat)
max_statistics_size = _max_statistics_size_from_params(params_flat)
to_pad = -num_statistics % num_devices_for_pjit
num_statistics += to_pad
if num_statistics == 0:
num_statistics = num_devices_for_pjit
max_statistics_size = block_size
statistics_shape = [num_statistics, max_statistics_size, max_statistics_size]
global_stats = GlobalShardedParameterStats(
[statistics_shape, jnp.float32],
[statistics_shape, jnp.float32],
[[num_statistics], jnp.int32],
)
return ShampooState(
count=[[], jnp.float32],
stats=ShardedShampooStats(global_stats, local_stats),
)
def sharded_update_fn(grads, state, params):
"""Transform the input gradient and update all statistics in sharded mode.
Args:
grads: the gradient tensors for the parameters.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
params_flat, treedef = jax.tree_flatten(params)
grads_flat = treedef.flatten_up_to(grads)
global_stats = state.stats.global_stats
local_stats_flat = treedef.flatten_up_to(state.stats.local_stats)
stats_flat = [
_convert_to_parameter_stats(global_stats, local_stat)
for local_stat in local_stats_flat
]
new_stats_flat = jax.tree_map(
lambda g, s, p: _compute_stats(g, s, p, state.count),
grads_flat,
stats_flat,
params_flat,
)
outputs = jax.tree_map(
lambda g, s, p: _transform_grad(g, s, p, state.count),
grads_flat,
new_stats_flat,
params_flat,
)
updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())
updates = jax.tree_unflatten(treedef, updates_flat)
# Create new local_stats
new_local_stats_flat = [
_convert_from_parameter_stats(new_stat, local_stat)
for new_stat, local_stat in zip(new_stats_flat, local_stats_flat)
]
max_size = global_stats.statistics.shape[1]
new_padded_statistics = []
for stat in new_stats_flat:
new_padded_statistics.extend(
[pad_square_matrix(stat, max_size) for stat in stat.statistics]
)
# Create global stats
# TODO(rohananil): Preconditioner is not updated every step, so cost of
# stack/pad can be obviated away.
# Pad the statistics and preconditioner matrices to be a multiple of
# num devices.
# TODO(rohananil): Relax to only the size of the mesh axis where the dim
# is split on.
to_pad = -len(new_padded_statistics) % num_devices_for_pjit
if not new_padded_statistics:
to_pad = num_devices_for_pjit
stat_dtype = jnp.float32
else:
stat_dtype = new_padded_statistics[0].dtype
new_padded_statistics.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)]
)
new_stacked_padded_statistics = jnp.stack(new_padded_statistics)
new_stacked_padded_statistics = pjit.with_sharding_constraint(
new_stacked_padded_statistics, statistics_partition_spec
)
def _internal_inverse_pth_root_all():
preconditioners, errors = _matrix_inverse_pth_root_pjit(
new_stacked_padded_statistics,
global_stats.exponents,
statistics_partition_spec,
)
return preconditioners, errors
if preconditioning_compute_steps == 1:
new_preconditioners, errors = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
preconditioners_init = new_stacked_padded_statistics
n = new_stacked_padded_statistics.shape[0]
errors_init = jnp.ones([n], jnp.float32) * inverse_failure_threshold
init_state = [preconditioners_init, errors_init]
perform_step = state.count % preconditioning_compute_steps == 0
new_preconditioners, errors = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state
)
new_local_stats_flat = _add_error_into_local_stats(
new_local_stats_flat, errors, inverse_failure_threshold
)
new_local_stats = jax.tree_unflatten(treedef, new_local_stats_flat)
errors = errors.reshape((-1, 1, 1))
predicate = jnp.logical_or(
jnp.isnan(errors), errors >= inverse_failure_threshold
).astype(new_preconditioners.dtype)
# TODO(rohananil): Check for numerical instabilities.
new_conditional_preconditioners = (
predicate * global_stats.preconditioners
+ (1.0 - predicate) * new_preconditioners
)
new_global_stats = GlobalShardedParameterStats(
new_stacked_padded_statistics,
new_conditional_preconditioners,
global_stats.exponents,
)
new_shampoo_state = ShampooState(
count=state.count + 1,
stats=ShardedShampooStats(new_global_stats, new_local_stats),
)
return updates, new_shampoo_state
def init_fn(params):
"""Initialise the optimiser's state."""
def _init(param):
preconditioner = preconditioner_from_params(param)
statistics = []
preconditioners = []
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
statistics = [
matrix_epsilon * jnp.eye(s[0], dtype=jnp.float32) for s in shapes
]
preconditioners = [jnp.eye(s[0], dtype=jnp.float32) for s in shapes]
diagonal_statistics = []
if _graft_type_has_diagonal_statistics():
diagonal_statistics = jnp.zeros_like(param)
diagonal_momentum = _quantize_momentum(jnp.zeros_like(param))
momentum = _quantize_momentum(jnp.zeros_like(param))
return ParameterStats(
_quantize_diagonal_statistics(diagonal_statistics),
_maybe_quantize_statistics(statistics),
_maybe_quantize_preconditioners(preconditioners),
diagonal_momentum,
momentum,
init_training_metrics(len(statistics)),
)
return ShampooState(
count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params)
)
def _skip_preconditioning(param):
return len(param.shape) < skip_preconditioning_rank_lt or any(
[s > skip_preconditioning_dim_size_gt for s in param.shape]
)
def _compute_stats(grad, state, param, step):
"""Compute per-parameter statistics."""
preconditioner = preconditioner_from_params(param)
new_statistics = [[]] * len(state.statistics)
w1 = beta2
w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)
if not _skip_preconditioning(param):
def compute_updated_statistics():
return preconditioner.updated_statistics_from_grad(
state.statistics,
grad,
w1=w1,
w2=w2,
to_float=_to_float,
from_float=lambda x: _maybe_quantize_statistics([x])[0],
precision=tensordot_precision,
)
if statistics_compute_steps > 1:
perform_step = step % statistics_compute_steps == 0
init_state = state.statistics
new_statistics = list(
efficient_cond(perform_step, compute_updated_statistics, init_state)
)
else:
new_statistics = compute_updated_statistics()
return ParameterStats(
state.diagonal_statistics,
new_statistics,
state.preconditioners,
state.diagonal_momentum,
state.momentum,
state.training_metrics,
)
mi_pth_root = functools.partial(
matrix_inverse_pth_root,
ridge_epsilon=matrix_epsilon,
precision=precision,
relative_matrix_epsilon=relative_matrix_epsilon,
lobpcg_topk_precondition=lobpcg_topk_precondition,
lobpcg_max_iter=lobpcg_max_iter,
)
def _matrix_inverse_pth_root_vmap(xs, ps):
return jax.vmap(mi_pth_root)(xs, ps)
def _quantized_matrix_inverse_pth_root_vmap(qxs, qds, qbs, ps):
def _quantized_to_float(qx, qd, qb):
qv = QuantizedValue(qx, qd, qb, qx.dtype, True, list(qx.shape))
return qv.to_float()
def matrix_inverse_pth_root_wrapper(qx, qd, qb, p):
v = _quantized_to_float(qx, qd, qb)
preconditioner, error = mi_pth_root(v, p)
qp = QuantizedValue.from_float_value(preconditioner, qx.dtype, True)
return qp.quantized, qp.diagonal, qp.bucket_size, error
return jax.vmap(matrix_inverse_pth_root_wrapper)(qxs, qds, qbs, ps)
def _matrix_inverse_pth_root_pjit(xs, ps, statistics_partition_spec=None):
# Partition the concatenated statistics matrix across all cores.
pspec_for_partition = preconditioner_partition_spec
partitioned_xs = pjit.with_sharding_constraint(xs, pspec_for_partition)
if preconditioner_partition_spec:
partitioned_ps_spec = pjit.PartitionSpec(preconditioner_partition_spec[0])
else:
partitioned_ps_spec = None
partitioned_ps = pjit.with_sharding_constraint(ps, partitioned_ps_spec)
# Run matrix inverse pth root on each shard.
partitioned_preconditioners, partitioned_errors = _matrix_inverse_pth_root_vmap(
partitioned_xs, partitioned_ps
)
# Reshard output to have the same PSpec as input. This is required to avoid
# vmap seeing the full set of statistics.
partitioned_preconditioners = pjit.with_sharding_constraint(
partitioned_preconditioners, pspec_for_partition
)
# Recombine the outputs at each core.
preconditioners = pjit.with_sharding_constraint(
partitioned_preconditioners, statistics_partition_spec
)
errors = pjit.with_sharding_constraint(partitioned_errors, pjit.PartitionSpec())
return preconditioners, errors
def _pmap_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
):
"""Computes preconditioners for given statistics in states in PMAP mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
if batch_axis_name:
num_devices = lax.psum(1, batch_axis_name)
else:
num_devices = 1
num_statistics = len(statistics)
# Pad statistics and exponents to next multiple of num_devices.
packed_statistics = [pad_square_matrix(stat, max_size) for stat in statistics]
to_pad = -num_statistics % num_devices
packed_statistics.extend(
[jnp.eye(max_size, dtype=packed_statistics[0].dtype) for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
if not packed_statistics:
return states
all_statistics = batch(packed_statistics, num_devices)
all_exponents = batch(exponents, num_devices)
def _internal_inverse_pth_root_all():
if batch_axis_name:
current_replica = lax.axis_index(batch_axis_name)
preconditioners, errors = _matrix_inverse_pth_root_vmap(
all_statistics[current_replica], all_exponents[current_replica]
)
preconditioners = jax.lax.all_gather(preconditioners, batch_axis_name)
errors = jax.lax.all_gather(errors, batch_axis_name)
preconditioners_flat = unbatch(preconditioners)
errors_flat = unbatch(errors)
else:
preconditioners, errors = _matrix_inverse_pth_root_vmap(
all_statistics[0], all_exponents[0]
)
preconditioners_flat = unbatch(jnp.stack([preconditioners]))
errors_flat = unbatch(jnp.stack([errors]))
return preconditioners_flat, errors_flat
if preconditioning_compute_steps == 1:
preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
preconditioners_init = packed_statistics
errors_init = [inverse_failure_threshold] * len(packed_statistics)
init_state = [preconditioners_init, errors_init]
perform_step = step % preconditioning_compute_steps == 0
preconditioners_flat, errors_flat = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state
)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold
)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None
)
new_preconditioners_flat = []
new_errors_flat = []
for p, shape, prev_p, error in zip(
preconditioners_flat, original_shapes, prev_preconditioners, errors_flat
):
new_preconditioners_flat.append(
_select_preconditioner(error, p[: shape[0], : shape[1]], prev_p)
)
new_errors_flat.append(error)
assert len(states) == len(num_statistics_per_state)
assert len(new_preconditioners_flat) == num_statistics
assert len(new_errors_flat) == num_statistics
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
idx = 0
errors_for_states = []
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
errors_for_states.append(jnp.array(0, jnp.float32))
else:
preconditioners_for_state = new_preconditioners_flat[
idx : idx + num_statistics
]
assert len(state.statistics) == len(preconditioners_for_state)
preconditioners_for_states.append(preconditioners_for_state)
errors_for_state = jnp.stack(
new_errors_flat[idx : idx + num_statistics]
)
assert len(state.statistics) == len(errors_for_state)
errors_for_states.append(errors_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_errors in zip(
states, preconditioners_for_states, errors_for_states
):
if state.statistics:
new_errors = jnp.where(
jnp.logical_and(
new_errors > 0.0, new_errors != inverse_failure_threshold
),
new_errors,
state.training_metrics.inverse_pth_root_errors,
)
new_training_metrics = TrainingMetrics(new_errors)
new_states.append(
ParameterStats(
state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_training_metrics,
)
)
return new_states
def _pmap_quantized_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
):
"""Computes preconditioners for given statistics in states in PMAP mode.
For quantization, each statistic is represented by three values:
quantized matrix, diagonal, and bucket sizes, we run inverse pth-roots
without ever recreating the original matrix in f32.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
num_devices = lax.psum(1, batch_axis_name)
num_statistics = len(statistics)
quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers()
# Complexity here is around: shapes needing be statically shaped,
# our custom quantization type requires a different type of packing.
# Parallel tensors:
# quantized [dxd]
# diagonals [d] f32
# bucket_sizes [d] f32
packed_quantized_statistics = [
pad_square_matrix(stat.quantized, max_size) for stat in statistics
]
packed_quantized_diagonals = [
pad_vector(stat.diagonal, max_size) for stat in statistics
]
packed_quantized_bucket_sizes = [
pad_vector(stat.bucket_size, max_size) for stat in statistics
]
to_pad = -num_statistics % num_devices
padded_eye = jnp.eye(max_size, dtype=jnp.float32)
quantized_eye = QuantizedValue.from_float_value(
padded_eye, quantized_dtype, True
)
packed_quantized_statistics.extend(
[quantized_eye.quantized for _ in range(to_pad)]
)
packed_quantized_diagonals.extend(
[quantized_eye.diagonal for _ in range(to_pad)]
)
packed_quantized_bucket_sizes.extend(
[quantized_eye.bucket_size for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
if not packed_quantized_statistics:
return states
all_quantized_statistics = batch(packed_quantized_statistics, num_devices)
all_quantized_diagonals = batch(packed_quantized_diagonals, num_devices)
all_quantized_bucket_sizes = batch(packed_quantized_bucket_sizes, num_devices)
all_exponents = batch(exponents, num_devices)
def _internal_inverse_pth_root_all():
current_replica = lax.axis_index(batch_axis_name)
(
quantized_preconditioners,
quantized_diagonals,
quantized_bucket_sizes,
errors,
) = _quantized_matrix_inverse_pth_root_vmap(
all_quantized_statistics[current_replica],
all_quantized_diagonals[current_replica],
all_quantized_bucket_sizes[current_replica],
all_exponents[current_replica],
)
quantized_preconditioners = jax.lax.all_gather(
quantized_preconditioners, batch_axis_name
)
quantized_diagonals = jax.lax.all_gather(
quantized_diagonals, batch_axis_name
)
quantized_bucket_sizes = jax.lax.all_gather(
quantized_bucket_sizes, batch_axis_name
)
errors = jax.lax.all_gather(errors, batch_axis_name)
quantized_preconditioners_flat = unbatch(quantized_preconditioners)
quantized_diagonals_flat = unbatch(quantized_diagonals)
quantized_bucket_sizes_flat = unbatch(quantized_bucket_sizes)
errors_flat = unbatch(errors)
return (
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
errors_flat,
)
if preconditioning_compute_steps == 1:
(
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
errors_flat,
) = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
quantized_preconditioners_init = packed_quantized_statistics
quantized_diagonals_init = packed_quantized_diagonals
quantized_bucket_sizes_init = packed_quantized_bucket_sizes
errors_init = [inverse_failure_threshold] * len(
quantized_preconditioners_init
)
init_state = [
quantized_preconditioners_init,
quantized_diagonals_init,
quantized_bucket_sizes_init,
errors_init,
]
perform_step = step % preconditioning_compute_steps == 0
(
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
errors_flat,
) = efficient_cond(perform_step, _internal_inverse_pth_root_all, init_state)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold
)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None
)
new_quantized_preconditioners_flat = []
new_quantized_diagonals_flat = []
new_quantized_bucket_sizes_flat = []
new_errors_flat = []
for p, d, b, shape, prev_p, error in zip(
quantized_preconditioners_flat,
quantized_diagonals_flat,
quantized_bucket_sizes_flat,
original_shapes,
prev_preconditioners,
errors_flat,
):
new_quantized_preconditioners_flat.append(
_select_preconditioner(
error, p[: shape[0], : shape[1]], prev_p.quantized
)
)
new_quantized_diagonals_flat.append(
_select_preconditioner(error, d[: shape[0]], prev_p.diagonal)
)
new_quantized_bucket_sizes_flat.append(
_select_preconditioner(error, b[: shape[0]], prev_p.bucket_size)
)
new_errors_flat.append(error)
assert len(states) == len(num_statistics_per_state)
assert len(new_quantized_preconditioners_flat) == num_statistics
assert len(new_quantized_diagonals_flat) == num_statistics
assert len(new_quantized_bucket_sizes_flat) == num_statistics
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
errors_for_states = []
idx = 0
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
errors_for_states.append(jnp.array(0, jnp.float32))
else:
quantized_preconditioners_for_state = (
new_quantized_preconditioners_flat[idx : idx + num_statistics]
)
quantized_diagonals_for_state = new_quantized_diagonals_flat[
idx : idx + num_statistics
]
quantized_bucket_sizes_for_state = new_quantized_bucket_sizes_flat[
idx : idx + num_statistics
]
errors_for_state = jnp.stack(
new_errors_flat[idx : idx + num_statistics]
)
assert len(state.statistics) == len(quantized_preconditioners_for_state)
assert len(state.statistics) == len(quantized_diagonals_for_state)
assert len(state.statistics) == len(quantized_bucket_sizes_for_state)
assert len(state.statistics) == len(errors_for_state)
quantized_preconditioners = []
for qv, qd, qb in zip(
quantized_preconditioners_for_state,
quantized_diagonals_for_state,
quantized_bucket_sizes_for_state,
):
quantized_preconditioners.append(
QuantizedValue(qv, qd, qb, qv.dtype, True, list(qv.shape))
)
preconditioners_for_states.append(quantized_preconditioners)
errors_for_states.append(errors_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_errors in zip(
states, preconditioners_for_states, errors_for_states
):
if state.statistics:
new_errors = jnp.where(
jnp.logical_and(
new_errors > 0.0, new_errors != inverse_failure_threshold
),
new_errors,
state.training_metrics.inverse_pth_root_errors,
)
new_training_metrics = TrainingMetrics(new_errors)
new_states.append(
ParameterStats(
state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_training_metrics,
)
)
return new_states
def _pjit_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
):
"""Computes preconditioners for given statistics in states in PJIT mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
num_statistics = len(statistics)
to_pad = -num_statistics % num_devices_for_pjit
padded_statistics = [pad_square_matrix(stat, max_size) for stat in statistics]
padded_statistics.extend(
[jnp.eye(max_size, dtype=padded_statistics[0].dtype) for _ in range(to_pad)]
)
exponents.extend([1 for _ in range(to_pad)])
all_statistics = jnp.stack(padded_statistics)
all_exponents = jnp.stack(exponents)
def _internal_inverse_pth_root_all():
preconditioners, errors = _matrix_inverse_pth_root_pjit(
all_statistics, all_exponents
)
b1 = preconditioners.shape[0]
def split(batched_values):
return [
jnp.squeeze(v)
for v in jnp.split(batched_values, indices_or_sections=b1, axis=0)
]
return split(preconditioners), split(errors)
if preconditioning_compute_steps == 1:
preconditioners_flat, errors_flat = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large init value for error.
preconditioners_init = padded_statistics
errors_init = [inverse_failure_threshold] * len(padded_statistics)
init_state = [preconditioners_init, errors_init]
perform_step = step % preconditioning_compute_steps == 0
preconditioners_flat, errors_flat = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state
)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold
)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None
)
new_preconditioners_flat = []
new_errors_flat = []
for p, shape, prev_p, error in zip(
preconditioners_flat, original_shapes, prev_preconditioners, errors_flat
):
new_preconditioners_flat.append(
_select_preconditioner(error, p[: shape[0], : shape[1]], prev_p)
)
new_errors_flat.append(error)
assert len(states) == len(num_statistics_per_state)
assert len(new_preconditioners_flat) == num_statistics
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
errors_for_states = []
idx = 0
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
errors_for_states.append(jnp.array(0, jnp.float32))
else:
preconditioners_for_state = new_preconditioners_flat[
idx : idx + num_statistics
]
assert len(state.statistics) == len(preconditioners_for_state)
preconditioners_for_states.append(preconditioners_for_state)
errors_for_state = jnp.stack(
new_errors_flat[idx : idx + num_statistics]
)
assert len(state.statistics) == len(errors_for_state)
errors_for_states.append(errors_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_errors in zip(
states, preconditioners_for_states, errors_for_states
):
if state.statistics:
new_errors = jnp.where(
jnp.logical_and(
new_errors > 0.0, new_errors != inverse_failure_threshold
),
new_errors,
state.training_metrics.inverse_pth_root_errors,
)
new_training_metrics = TrainingMetrics(new_errors)
new_states.append(
ParameterStats(
state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_training_metrics,
)
)
return new_states
def _compute_preconditioners(states, params, step):
"""Computes preconditioners for given statistics in states.
Args:
states: A list of optimizer states.
params: A list of params.
step: Current step number
Returns:
New optimizer states after computing the preconditioner.
"""
statistics = []
num_statistics_per_state = []
original_shapes = []
exponents = []
max_size = 0
prev_preconditioners = []
for state, param in zip(states, params):
num_statistics = len(state.statistics)
num_statistics_per_state.append(num_statistics)
original_shapes_for_state = []
if num_statistics > 0:
preconditioner = preconditioner_from_params(param)
for statistic in state.statistics:
exponents.append(
preconditioner.exponent_for_preconditioner()
if exponent_override == 0
else exponent_override
)
original_shapes_for_state.append(statistic.shape)
max_size = max(max_size, statistic.shape[0])
statistics.extend(state.statistics)
prev_preconditioners.extend(state.preconditioners)
original_shapes.extend(original_shapes_for_state)
if not shard_optimizer_states:
# Quantization is only enabled if batch_axis_name is not set.
quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers()
if quantized_dtype == jnp.float32:
return _pmap_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
)
else:
return _pmap_quantized_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
)
else:
return _pjit_compute_preconditioners(
states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners,
)
def _transform_grad(grad, state, param, step):
"""Transform per-parameter gradients."""
preconditioner = preconditioner_from_params(param)
sgd_update = grad
new_diagonal_statistics = state.diagonal_statistics.to_float()
if (
graft_type == GraftingType.ADAGRAD
or graft_type == GraftingType.ADAGRAD_NORMALIZED
):
scaled_grad = grad
if graft_type == GraftingType.ADAGRAD_NORMALIZED:
scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16)
new_diagonal_statistics = state.diagonal_statistics.to_float() + jnp.square(
scaled_grad
)
adagrad_update = scaled_grad / (
jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon
)
grafting_update = adagrad_update
elif (
graft_type == GraftingType.RMSPROP
or graft_type == GraftingType.RMSPROP_NORMALIZED
):
scaled_grad = grad
if graft_type == GraftingType.RMSPROP_NORMALIZED:
scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16)
w1 = beta2
w2 = beta2 if beta2 == 1.0 else (1.0 - beta2)
new_diagonal_statistics = (
w1 * state.diagonal_statistics.to_float() + w2 * jnp.square(scaled_grad)
)
rmsprop_update = scaled_grad / (
jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon
)
if clip_by_scaled_gradient_norm:
scaled_grad_norm = jnp.linalg.norm(rmsprop_update) / (
jnp.sqrt(float(rmsprop_update.size))
)
clipping_denom = jnp.maximum(
1.0, scaled_grad_norm / clip_by_scaled_gradient_norm
)
rmsprop_update /= clipping_denom
grafting_update = rmsprop_update
elif graft_type == GraftingType.SGD:
grafting_update = sgd_update
else:
grafting_update = jnp.ones_like(sgd_update) * jnp.sign(sgd_update)
lr = learning_rate
if callable(learning_rate):
lr = learning_rate(step)
preconditioner_multiplier = lr if not decoupled_learning_rate else 1.0
grafting_update = grafting_update * preconditioner_multiplier
precond_grad = grad
if not _skip_preconditioning(param):
precond_grad = preconditioner.preconditioned_grad(
precond_grad, _maybe_dequantize_preconditioners(state.preconditioners)
)
else:
precond_grad = grafting_update
grafting_update_norm = jnp.linalg.norm(grafting_update)
precond_grad_norm = jnp.linalg.norm(precond_grad)
multiplier = grafting_update_norm / (precond_grad_norm + 1e-16)
shampoo_update = precond_grad * multiplier
shampoo_update_with_wd = shampoo_update
grafting_update_with_wd = grafting_update
if weight_decay != 0 and not decoupled_weight_decay:
shampoo_update_with_wd = shampoo_update + weight_decay * param
grafting_update_with_wd = grafting_update + weight_decay * param
w = (1.0 - beta1) if moving_average_for_momentum else 1.0
shampoo_update_with_wd_momentum = (
state.momentum.to_float() * beta1 + w * shampoo_update_with_wd
)
grafting_update_with_wd_momentum = (
state.diagonal_momentum.to_float() * beta1 + w * grafting_update_with_wd
)
run_shampoo = (step >= start_preconditioning_step).astype(
grafting_update_with_wd_momentum.dtype
)
momentum_update = (
run_shampoo * shampoo_update_with_wd_momentum
+ (1.0 - run_shampoo) * grafting_update_with_wd_momentum
)
wd_update = (
run_shampoo * shampoo_update_with_wd
+ (1.0 - run_shampoo) * grafting_update_with_wd
)
nesterov_momentum_update = momentum_update
if nesterov:
nesterov_momentum_update = w * wd_update + beta1 * momentum_update
if weight_decay != 0 and decoupled_weight_decay:
nesterov_momentum_update = (
nesterov_momentum_update + lr * weight_decay * param
)
momentum_multiplier = lr if decoupled_learning_rate else 1.0
transformed_update = -1.0 * momentum_multiplier * nesterov_momentum_update
new_diagonal_momentum = grafting_update_with_wd_momentum
new_momentum = shampoo_update_with_wd_momentum
param_stats = ParameterStats(
_quantize_diagonal_statistics(new_diagonal_statistics),
state.statistics,
state.preconditioners,
_quantize_momentum(new_diagonal_momentum),
_quantize_momentum(new_momentum),
state.training_metrics,
)
return transformed_update, param_stats
def update_fn(grads, state, params):
"""Transform the input gradient and update all statistics.
Args:
grads: the gradient tensors for the parameters
and any custom gradients for preconditioners.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
params_flat, treedef = jax.tree_flatten(params)
stats_flat = treedef.flatten_up_to(state.stats)
grads_flat = treedef.flatten_up_to(grads)
stats_grads = grads_flat
new_stats_flat = jax.tree_map(
lambda g, s, p: _compute_stats(g, s, p, state.count),
stats_grads,
stats_flat,
params_flat,
)
new_stats_flat = _compute_preconditioners(
new_stats_flat, params_flat, state.count
)
outputs = jax.tree_map(
lambda g, s, p: _transform_grad(g, s, p, state.count),
grads_flat,
new_stats_flat,
params_flat,
)
updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())
updates = jax.tree_unflatten(treedef, updates_flat)
new_stats = jax.tree_unflatten(treedef, new_stats_flat)
new_state = ShampooState(count=state.count + 1, stats=new_stats)
return updates, new_state
if shard_optimizer_states:
# Hijacks the init_fn signature so we can return an OptState with
# appropriate init_fns.
opt_init_fn = sharded_init_fn
def _init_fns(unused_params):
return InitFnState(
init_fn=opt_init_fn,
pspec_fn=sharded_init_partition_spec_fn,
shape_and_dtype_fn=sharded_init_shape_and_dtype_fn,
)
opt_update_fn = sharded_update_fn
return optax.GradientTransformation(_init_fns, opt_update_fn)
else:
return optax.GradientTransformation(init_fn, update_fn) | Distributed Shampoo optimizer.
Distributed Shampoo is a second-order preconditioned method (concretely, a
variant of full-matrix Adagrad), that provides significant convergence and
wall-clock time improvements compared to conventional first-order methods,
and that has been shown to scale to large state-of-the-art deep learning
models.
References:
Scalable Second Order Optimization for Deep Learning,
Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer
Preprint: https://arxiv.org/abs/2002.09018
Args:
learning_rate: the step size used to update the parameters.
block_size: Block size for large layers (if > 0). Preconditioning compute
operation is cubic in the dimension of the tensor. Block size allows us to
chunk the layers into sub-layers of maximal dimension dictated by this
value. Use 128 as default (increase if you have compute budget).
beta1: momentum parameter.
beta2: second moment averaging parameter.
diagonal_epsilon: epsilon for diagonal adagrad (only if layerwise grafting
to AdaGrad is enabled).
matrix_epsilon: epsilon to add to statistics before computing inverse pth
root. If you are running in f32 precision for inverse pth root
(recommended today) this can go upto 1e-6. If you have latest hardware
with native f64 precision, set this upto 1e-12.
weight_decay: Weight decay for regularization.
start_preconditioning_step: When to start Shampoo update before which
diagonal update is used. This is because we dont have enough information
to do stable inverse.
preconditioning_compute_steps: How often to compute preconditioner.
Performance tuning params for controlling memory and compute requirements.
Ideally set this and statistics_compute_steps params to 1.
statistics_compute_steps: How often to compute statistics.
best_effort_shape_interpretation: If there are some small dimensions,
collapse them e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if
block = 1024, [1, 2, 768, 1, 2048] --> [2, 768, 2048]
graft_type: Grafting is a technique to fix the layerwise scale of Shampoo
optimizer. This allows us to plugin the Shampoo optimizer into settings
where SGD/AdaGrad is already well tuned.
nesterov: Nesterov momentum.
exponent_override: Override the exponent used in matrix inverse.
batch_axis_name: labeled axis over pmap for data-parallel training the
optimizer used for.
statistics_partition_spec: PartitionSpec to be used in sharded mode.
preconditioner_partition_spec: PartitionSpec to be used in sharded mode.
num_devices_for_pjit: Number of devices to parallelize over when using pjit.
shard_optimizer_states: Shard optimizer states to save memory in model
parallel training.
best_effort_memory_usage_reduction: Best effort memory usage reduction. -
diagonal_statistics -> jnp.bfloat16 - momentum buffers (2x) -> jnp.int8 -
statistics, preconditioners -> jnp.int16 + diagonals
inverse_failure_threshold: numerics are hard and inverses fail sometimes; we
determine that using this threshold.
moving_average_for_momentum: Whether to use moving average for momentum
instead of exponential moving average.
skip_preconditioning_dim_size_gt: Skip if preconditioning dim size is
greater than this value.
clip_by_scaled_gradient_norm: Clip by scaled gradient norm (only useful when
using RMSProp Grafting).
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
tensordot_precision: Optional precision to use for the tensordot operation
when computing statistics (e.g., G Gᵀ). Same options as `precision` above.
relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
value when computing inverse-pth root.
merge_small_dims_block_size: Used as the maximum block size
to merge the shapes.
lobpcg_topk_precondition: If nonzero, specifies the number of top
eigenvectors to subtract out before performing LOBPCG. Note this makes
relative_matrix_epsilon essentially free.
lobpcg_max_iter: Number of LOBPCG iterations, if zero defaults to
`lobpcg_topk_precondition`.
precondtioner_type: Preconditioner type to select all, left only or right
only preconditioners.
skip_preconditioning_rank_lt: Skips preconditioning for parameters with
rank less than this value.
decoupled_learning_rate: If True, use decoupled learning rate, otherwise
couple it with preconditioned gradient computation. (Default True)
decoupled_weight_decay: If True, use decoupled weight decay, otherwise
couple with weight decay. (Default False)
Returns:
a GradientTransformation. | distributed_shampoo | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/distributed_shampoo.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/distributed_shampoo.py | Apache-2.0 |
def quantize(cls, fvalue, quantized_dtype, extract_diagonal=False):
"""Returns quantized value and the bucket."""
if quantized_dtype == jnp.float32:
return fvalue, [], []
elif quantized_dtype == jnp.bfloat16:
return fvalue.astype(jnp.bfloat16), [], []
float_dtype = fvalue.dtype
if quantized_dtype == jnp.int8:
# value -128 is not used.
num_buckets = jnp.array(127.0, dtype=float_dtype)
elif quantized_dtype == jnp.int16:
# value -32768 is not used.
num_buckets = jnp.array(32767.0, dtype=float_dtype)
else:
raise ValueError(f"Quantized dtype {quantized_dtype} not supported.")
# max value is mapped to num_buckets
if extract_diagonal and fvalue.ndim != 2:
raise ValueError(
f"Input array {fvalue} must be 2D to work with extract_diagonal."
)
diagonal_fvalue = []
if extract_diagonal:
diagonal_fvalue = jnp.diag(fvalue)
# Remove the diagonal entries.
fvalue = fvalue - jnp.diag(diagonal_fvalue)
# TODO(rohananil): Extend this by making use of information about the blocks
# SM3 style which will be useful for diagonal statistics
# We first decide the scale.
if fvalue.ndim < 1:
raise ValueError(
f"Input array {fvalue} must have a strictly positive number of dimensions."
)
max_abs = jnp.max(jnp.abs(fvalue), axis=0)
bucket_size = max_abs / num_buckets
bs_expanded = bucket_size[jnp.newaxis, Ellipsis]
# To avoid divide by 0.0
bs_nonzero = jnp.where(
bs_expanded > 0.0, bs_expanded, jnp.ones_like(bs_expanded)
)
ratio = fvalue / bs_nonzero
# We use rounding to remove bias.
quantized = jnp.round(ratio)
return quantized.astype(quantized_dtype), diagonal_fvalue, bucket_size | Returns quantized value and the bucket. | quantize | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/quantization_utils.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/quantization_utils.py | Apache-2.0 |
def to_float(self):
"""Returns the float value."""
if isinstance(self.quantized, list) and not self.quantized:
return self.quantized
if self.quantized_dtype == jnp.float32:
return self.quantized
if self.quantized_dtype == jnp.bfloat16:
return self.quantized.astype(jnp.float32)
float_dtype = self.bucket_size.dtype
bucket_size = self.bucket_size[jnp.newaxis, Ellipsis]
val = self.quantized.astype(float_dtype) * bucket_size
if self.extract_diagonal:
val += jnp.diag(self.diagonal)
return val | Returns the float value. | to_float | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/quantization_utils.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/quantization_utils.py | Apache-2.0 |
def init_fn(params):
"""Initialise the optimiser's state."""
def _init(param):
accumulators = [jnp.zeros([s]) for s in param.shape]
momentum = _quantize_momentum(jnp.zeros_like(param))
return ParameterStats(accumulators, momentum)
return SM3State(
count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params)
) | Initialise the optimiser's state. | sm3.init_fn | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/sm3.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/sm3.py | Apache-2.0 |
def sm3(
learning_rate, beta1=0.9, beta2=0.999, diagonal_epsilon=1e-10, normalize_grads=False
):
"""SM3 optimizer.
Memory-Efficient Adaptive Optimization, Rohan Anil, Vineet Gupta, Tomer Koren,
Yoram Singer
https://arxiv.org/abs/1901.11150
Args:
learning_rate: the step size used to update the parameters.
beta1: momentum parameter.
beta2: second moment averaging parameter.
diagonal_epsilon: epsilon for sm3
normalize_grads: Whether to normalize grads. Author finds it useful when
grads are high variance.
Returns:
a GradientTransformation.
"""
def _quantize_momentum(momentum_statistics):
return QuantizedValue.from_float_value(momentum_statistics, jnp.int8)
def init_fn(params):
"""Initialise the optimiser's state."""
def _init(param):
accumulators = [jnp.zeros([s]) for s in param.shape]
momentum = _quantize_momentum(jnp.zeros_like(param))
return ParameterStats(accumulators, momentum)
return SM3State(
count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params)
)
def _get_expanded_shape(shape, i):
rank = len(shape)
# Replaces a `shape` of [M, N, K] with 1 in all dimensions except for i.
# For eg: i = 1 returns [1, N, 1].
return [1] * i + [shape[i]] + [1] * (rank - i - 1)
def _moving_averages(grad, accumulators):
w = (1.0 - beta2) if beta2 != 1.0 else 1.0
if grad.ndim < 2:
return beta2 * accumulators[0] + w * grad**2
else:
min_accumulator = functools.reduce(jnp.minimum, accumulators)
return beta2 * min_accumulator + w * grad**2
def _moving_averages_momentum(grad, momentum):
w = (1.0 - beta1) if beta1 != 1.0 else 1.0
return beta1 * momentum.to_float() + w * grad
def _sketch_diagonal_statistics(grad, updated_diagonal_statistics):
all_diagonal_statistics = []
for i in range(grad.ndim):
axes = list(range(i)) + list(range(i + 1, grad.ndim))
dim_diagonal_statistics = jnp.max(updated_diagonal_statistics, axis=axes)
all_diagonal_statistics.append(dim_diagonal_statistics)
if grad.ndim == 1:
all_diagonal_statistics[0] = updated_diagonal_statistics
return all_diagonal_statistics
def update_fn(updates, state, params=None):
del params
stats = state.stats
if normalize_grads:
updates = jax.tree_map(lambda g: g / (jnp.linalg.norm(g) + 1e-16), updates)
# Reshape all vectors into N-d tensors to compute min over them.
# [n], [m] -> [n, 1], [1, m]
expanded_diagonal_statistics = jax.tree_map(
lambda grad, state: [ # pylint:disable=g-long-lambda
jnp.reshape(
state.diagonal_statistics[i], _get_expanded_shape(grad.shape, i)
)
for i in range(grad.ndim)
],
updates,
stats,
)
# Compute new diagonal statistics
new_diagonal_statistics = jax.tree_map(
_moving_averages, updates, expanded_diagonal_statistics
)
# Compute preconditioners (1/sqrt(s)) where s is the statistics.
new_preconditioners = jax.tree_map(
lambda t: 1.0 / jnp.sqrt(t + diagonal_epsilon), new_diagonal_statistics
)
preconditioned_grads = jax.tree_map(
lambda g, p: g * p, updates, new_preconditioners
)
# Compute updated momentum (also handle quantization)
updated_momentum = jax.tree_map(
lambda preconditioned_grad, state: _moving_averages_momentum( # pylint:disable=g-long-lambda
preconditioned_grad, state.diagonal_momentum
),
preconditioned_grads,
stats,
)
# Update diagonal statistics.
updated_diagonal_statistics = jax.tree_map(
_sketch_diagonal_statistics, updates, new_diagonal_statistics
)
# Update momentum.
new_sm3_stats = jax.tree_map(
lambda momentum, diagonal_stats: ParameterStats( # pylint:disable=g-long-lambda
diagonal_stats, _quantize_momentum(momentum)
),
updated_momentum,
updated_diagonal_statistics,
)
lr = learning_rate
if callable(learning_rate):
lr = learning_rate(state.count)
new_updates = jax.tree_map(lambda pg: -lr * pg, updated_momentum)
return new_updates, SM3State(count=state.count + 1, stats=new_sm3_stats)
return optax.GradientTransformation(init_fn, update_fn) | SM3 optimizer.
Memory-Efficient Adaptive Optimization, Rohan Anil, Vineet Gupta, Tomer Koren,
Yoram Singer
https://arxiv.org/abs/1901.11150
Args:
learning_rate: the step size used to update the parameters.
beta1: momentum parameter.
beta2: second moment averaging parameter.
diagonal_epsilon: epsilon for sm3
normalize_grads: Whether to normalize grads. Author finds it useful when
grads are high variance.
Returns:
a GradientTransformation. | sm3 | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/sm3.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/sm3.py | Apache-2.0 |
def product_with_transpose(
mat1,
mat2,
axes,
precision=lax.Precision.DEFAULT,
):
"""Returns mat1 * mat2^T for two matrices (possibly batched).
The rows and columns are the last two dimensions for each matrix.
Args:
mat1: First matrix.
mat2: Second matrix.
axes: The axes over which to apply the product.
precision: JAX precision to use for the multiplication.
"""
return jnp.tensordot(a=mat1, b=mat2, axes=axes, precision=precision) | Returns mat1 * mat2^T for two matrices (possibly batched).
The rows and columns are the last two dimensions for each matrix.
Args:
mat1: First matrix.
mat2: Second matrix.
axes: The axes over which to apply the product.
precision: JAX precision to use for the multiplication. | product_with_transpose | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def sliced_transposed_product(
mat,
block_size,
axes=(-1,),
precision=lax.Precision.DEFAULT,
):
"""Returns the blocked slices representing a symmetric contraction.
Specifically, the output is a contraction of the input mat with itself, in the
specified axes.
Args:
mat: The matrix for which we will compute a contraction with itself.
block_size: The size of row blocks to compute.
axes: Axes to use for the contraction.
precision: The precision to use in each computation.
Raises:
ValueError: Raised when the specified block size does not evenly divide
the number of rows of the input mat.
"""
rank = len(mat.shape)
def _make_axis_positive(ax):
assert -rank <= ax < rank
return ax + rank if ax < 0 else ax
positive_axes = [_make_axis_positive(ax) for ax in axes]
assert len(positive_axes) == len(axes)
remaining_axes = set(range(rank)) - set(positive_axes)
assert len(remaining_axes) == 1
remaining_ax = remaining_axes.pop()
num_rows = mat.shape[remaining_ax]
if num_rows % block_size != 0:
raise ValueError(
"The row dimension must be divisible by block_size. "
f"Instead got row dimension={num_rows} and block_size={block_size}."
)
block_rows = []
for i in range(num_rows // block_size):
start_indices = [0] * rank
start_indices[remaining_ax] = i * block_size
slice_sizes = list(mat.shape)
slice_sizes[remaining_ax] = block_size
slice_sizes_full = list(mat.shape)
slice_sizes_full[remaining_ax] = (i + 1) * block_size
block_rows.append(
product_with_transpose(
lax.dynamic_slice(
mat, start_indices=start_indices, slice_sizes=slice_sizes
),
lax.dynamic_slice(
mat, start_indices=[0] * rank, slice_sizes=slice_sizes_full
),
axes=(axes, axes),
precision=precision,
)
)
return SlicedSymmetricMatrix(block_rows=block_rows) | Returns the blocked slices representing a symmetric contraction.
Specifically, the output is a contraction of the input mat with itself, in the
specified axes.
Args:
mat: The matrix for which we will compute a contraction with itself.
block_size: The size of row blocks to compute.
axes: Axes to use for the contraction.
precision: The precision to use in each computation.
Raises:
ValueError: Raised when the specified block size does not evenly divide
the number of rows of the input mat. | sliced_transposed_product | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def sliced_transposed_product_concat(
mat,
block_size,
axes=(-1,),
precision=lax.Precision.DEFAULT,
):
"""Returns the concatenated slices representing mat*mat^T.
Args:
mat: The matrix for which we will compute mat*mat^T. It does not need to be
square, and may be batched.
block_size: The size of row blocks to compute.
axes: Axes to use for the contraction.
precision: The precision to use in each computation.
Raises:
ValueError: Raised when the specified block size does not evenly divide
the number of rows of the input mat.
"""
sliced_symmetric_matrix = sliced_transposed_product(
mat=mat, block_size=block_size, axes=axes, precision=precision
)
return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1) | Returns the concatenated slices representing mat*mat^T.
Args:
mat: The matrix for which we will compute mat*mat^T. It does not need to be
square, and may be batched.
block_size: The size of row blocks to compute.
axes: Axes to use for the contraction.
precision: The precision to use in each computation.
Raises:
ValueError: Raised when the specified block size does not evenly divide
the number of rows of the input mat. | sliced_transposed_product_concat | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def materialize_matrix(symmetric_matrix):
"""Returns a materialized symmetric matrix.
Args:
symmetric_matrix: the matrix represented by lower-triangular block slices.
"""
block_rows = symmetric_matrix.block_rows
block_size = block_rows[0].shape[-2]
num_blocks = len(block_rows)
# Slice the lower-triangular and diagonal blocks into blocks.
blocks = [
[
block_row[Ellipsis, i * block_size : (i + 1) * block_size]
for i in range(k + 1)
]
for k, block_row in enumerate(block_rows)
]
# Generate the (off-diagonal) upper-triangular blocks.
off_diags = [[] for _ in range(num_blocks - 1)]
for k, block_row in enumerate(block_rows[1:]):
for i in range(k + 1):
off_diags[i].append(
jnp.swapaxes(
a=block_row[Ellipsis, i * block_size : (i + 1) * block_size],
axis1=-1,
axis2=-2,
)
)
return jnp.block(
[row + row_t for row, row_t in zip(blocks[:-1], off_diags)] + [blocks[-1]]
) | Returns a materialized symmetric matrix.
Args:
symmetric_matrix: the matrix represented by lower-triangular block slices. | materialize_matrix | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def materialize_matrix_from_concat(
block_rows_concat,
num_blocks=None,
):
"""Returns a materialized symmetric matrix from concatenated slices.
Args:
block_rows_concat: The matrix represented as the concatenated
lower-triangular blocks.
num_blocks: The number of block-rows used to represent the symmetric matrix.
If not specified, it is inferred from the shape of block_rows_concat.
"""
if num_blocks is None:
num_blocks = find_num_blocks(block_rows_concat)
block_size = block_rows_concat.shape[-2]
block_rows = [
block_rows_concat[
Ellipsis,
(k * (k + 1))
// 2
* block_size : (((k + 1) * (k + 2)) // 2 + 1)
* block_size,
]
for k in range(num_blocks)
]
return materialize_matrix(SlicedSymmetricMatrix(block_rows=block_rows)) | Returns a materialized symmetric matrix from concatenated slices.
Args:
block_rows_concat: The matrix represented as the concatenated
lower-triangular blocks.
num_blocks: The number of block-rows used to represent the symmetric matrix.
If not specified, it is inferred from the shape of block_rows_concat. | materialize_matrix_from_concat | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def update_sliced_rows(
symmetric_matrix,
mat,
alpha,
beta,
axes=(-1,),
):
"""Implements the blocked equivalent of SYRK.
Specifically, the symmetric matrix (represented using lower-triangular block
rows) is updated using the sliced product of mat.
Args:
symmetric_matrix: The symmetric matrix to update.
mat: The matrix to use for the update = mat * mat^T. The number of rows
should match that of symmetric_matrix.
alpha: The weight for the update.
beta: The weight for the original symmetric matrix.
axes: Axes to use for the contraction of the update.
Returns:
The updated rows of alpha * mat * mat^T + beta * symmetric_matrix.
"""
block_size = symmetric_matrix.block_rows[0].shape[-2]
sym_prod = sliced_transposed_product(mat=mat, block_size=block_size, axes=axes)
return SlicedSymmetricMatrix(
block_rows=[
update * alpha + row * beta
for update, row in zip(sym_prod.block_rows, symmetric_matrix.block_rows)
]
) | Implements the blocked equivalent of SYRK.
Specifically, the symmetric matrix (represented using lower-triangular block
rows) is updated using the sliced product of mat.
Args:
symmetric_matrix: The symmetric matrix to update.
mat: The matrix to use for the update = mat * mat^T. The number of rows
should match that of symmetric_matrix.
alpha: The weight for the update.
beta: The weight for the original symmetric matrix.
axes: Axes to use for the contraction of the update.
Returns:
The updated rows of alpha * mat * mat^T + beta * symmetric_matrix. | update_sliced_rows | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def num_blocks_from_total_blocks(total_blocks):
"""Returns the number of blocks (i.e.
block rows) from the total blocks.
This is the inverse of the function x -> x*(x+1)/2.
For example, the matrix M = [[A, B^T], [B, C]] may be represented using a
total of 3 blocks ([A, B, C]). The number of corresponding block rows is 2.
Args:
total_blocks: The total blocks used to represent the matrix.
"""
num_blocks = np.round((np.sqrt(8 * total_blocks + 1) - 1) / 2).astype(np.int32)
if (num_blocks * (num_blocks + 1)) / 2 != total_blocks:
raise ValueError(
f"total_blocks={total_blocks} does not correspond to "
"a symmetric matrix. It must have the form total_blocks = x*(x+1)/2."
)
return num_blocks | Returns the number of blocks (i.e.
block rows) from the total blocks.
This is the inverse of the function x -> x*(x+1)/2.
For example, the matrix M = [[A, B^T], [B, C]] may be represented using a
total of 3 blocks ([A, B, C]). The number of corresponding block rows is 2.
Args:
total_blocks: The total blocks used to represent the matrix. | num_blocks_from_total_blocks | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def find_num_blocks(block_rows_concat):
"""Returns the number of (row) blocks representing the concatenated matrix.
For example, an input with dimensions [256, 2560] represents 10 square blocks,
which matches 4 lower-triangular block rows (1+2+3+4). So this function will
return 4.
Use ordinary numpy functions here so that the returned value is static.
Args:
block_rows_concat: The concatenated block array.
Raises:
ValueError: When the dimensions of the matrix do not correspond to a lower
triangular block representation.
"""
# Compute the number of square blocks used to represent the matrix.
total_blocks = block_rows_concat.shape[-1] / block_rows_concat.shape[-2]
# Determine the number of block rows by inverting y = x*(x+1)/2.
return num_blocks_from_total_blocks(total_blocks) | Returns the number of (row) blocks representing the concatenated matrix.
For example, an input with dimensions [256, 2560] represents 10 square blocks,
which matches 4 lower-triangular block rows (1+2+3+4). So this function will
return 4.
Use ordinary numpy functions here so that the returned value is static.
Args:
block_rows_concat: The concatenated block array.
Raises:
ValueError: When the dimensions of the matrix do not correspond to a lower
triangular block representation. | find_num_blocks | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def slice_symmetric_matrix(
mat,
block_size,
):
"""Returns sliced row blocks.
Args:
mat: A symmetric matrix.
block_size: The size of the row slices.
"""
num_rows = mat.shape[-2]
num_cols = mat.shape[-1]
if num_rows != num_cols:
raise ValueError("mat is not square.")
if num_rows % block_size != 0:
raise ValueError(
f"block size does not evenly divide rows. num_rows={num_rows}, block_size={block_size}"
)
return SlicedSymmetricMatrix(
block_rows=[
mat[
Ellipsis,
i * block_size : (i + 1) * block_size,
0 : (i + 1) * block_size,
]
for i in range(num_rows // block_size)
]
) | Returns sliced row blocks.
Args:
mat: A symmetric matrix.
block_size: The size of the row slices. | slice_symmetric_matrix | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def slice_symmetric_matrix_concat(
mat,
block_size,
):
"""Returns the concatenated sliced row blocks.
Args:
mat: A symmetric matrix.
block_size: The size of the row slices.
"""
sliced_symmetric_matrix = slice_symmetric_matrix(mat=mat, block_size=block_size)
return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1) | Returns the concatenated sliced row blocks.
Args:
mat: A symmetric matrix.
block_size: The size of the row slices. | slice_symmetric_matrix_concat | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def sliced_matrix_diag(mat):
"""Returns the diagonal of the symmetric matrix.
Args:
mat: The symmetric matrix represented in concatenated block form.
"""
rows, cols = mat.shape
total_blocks = cols // rows
num_blocks = num_blocks_from_total_blocks(total_blocks)
diags = []
for i in range(num_blocks):
last_index = rows * ((i + 2) * (i + 1)) // 2
first_index = last_index - rows
diags.append(jnp.diag(mat[Ellipsis, first_index:last_index]))
return jnp.concatenate(diags, axis=-1) | Returns the diagonal of the symmetric matrix.
Args:
mat: The symmetric matrix represented in concatenated block form. | sliced_matrix_diag | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def diag_as_concat(diag, block_size):
"""Returns the representation of a diagonal matrix in symmetric block form.
Args:
diag: The 1D array for the diagonals.
block_size: The size of blocks to use. Must divide the length of diag.
"""
assert len(diag.shape) == 1 # diag must be 1D.
assert len(diag) % block_size == 0
num_diag_blocks = len(diag) // block_size
blocks = []
for i in range(num_diag_blocks):
blocks.append(jnp.zeros(shape=(block_size, block_size * i), dtype=diag.dtype))
blocks.append(jnp.diag(diag[i * block_size : (i + 1) * block_size]))
return jnp.concatenate(blocks, axis=-1) | Returns the representation of a diagonal matrix in symmetric block form.
Args:
diag: The 1D array for the diagonals.
block_size: The size of blocks to use. Must divide the length of diag. | diag_as_concat | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def row_abs_maxes(mat):
"""Returns the max of the absolute values of the rows of the full matrix.
For example the symmetric matrix M = [[1, 6], [6, 2]] is represented using
mat = [1, 6, 2] with block_size = 1. In this case the function returns the
aboslute row maxes of the original symmetric matrix, [6, 6].
Args:
mat: The symmetric matrix represented as the concatenated blocks.
"""
rows, cols = mat.shape
# Find col and row max for each block.
col_maxes = []
row_maxes = []
for i in range(cols // rows):
block = jnp.abs(mat[Ellipsis, i * rows : (i + 1) * rows])
col_maxes.append(jnp.max(block, axis=1))
row_maxes.append(jnp.max(block, axis=0))
# global row max from block maxes.
num_blocks = num_blocks_from_total_blocks(cols // rows)
maxes = []
for i in range(num_blocks):
maxes.append(
jnp.concatenate(
row_maxes[(i * (i + 1) // 2) : ((i + 2) * (i + 1) // 2)]
+ [
col_maxes[((j + 1) * (j + 2)) // 2 - (j - i + 1)]
for j in range(i + 1, num_blocks)
],
axis=-1,
)
)
return jnp.max(jnp.stack(maxes), axis=0) | Returns the max of the absolute values of the rows of the full matrix.
For example the symmetric matrix M = [[1, 6], [6, 2]] is represented using
mat = [1, 6, 2] with block_size = 1. In this case the function returns the
aboslute row maxes of the original symmetric matrix, [6, 6].
Args:
mat: The symmetric matrix represented as the concatenated blocks. | row_abs_maxes | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def times_vector(mat, vec):
"""Returns the symmetric block-concatenated matrix multiplied by a vector.
Specifically, each value in the vector is multiplied by a row of the full
matrix. That is, the vector is broadcast and multiplied element-wise. Note
this would be the transpose of full_mat * vec if full_mat represented the full
symmetric matrix.
Args:
mat: The symmetric matrix represented as the concatenated blocks.
vec: The vector, having the same dimension as the materialized matrix.
"""
rows, cols = mat.shape
num_blocks = num_blocks_from_total_blocks(cols // rows)
multiplied = []
for i in range(num_blocks):
mat_block = mat[
Ellipsis, rows * ((i + 1) * i) // 2 : rows * ((i + 1) * (i + 2)) // 2
]
vec_block = vec[Ellipsis, rows * i : rows * (i + 1)]
multiplied.append(jnp.einsum("...ij,...i->ij", mat_block, vec_block))
return jnp.concatenate(multiplied, axis=-1) | Returns the symmetric block-concatenated matrix multiplied by a vector.
Specifically, each value in the vector is multiplied by a row of the full
matrix. That is, the vector is broadcast and multiplied element-wise. Note
this would be the transpose of full_mat * vec if full_mat represented the full
symmetric matrix.
Args:
mat: The symmetric matrix represented as the concatenated blocks.
vec: The vector, having the same dimension as the materialized matrix. | times_vector | python | borisdayma/dalle-mini | tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | https://github.com/borisdayma/dalle-mini/blob/master/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py | Apache-2.0 |
def _dataloader_datasets_non_streaming(
dataset: Dataset,
rng: jax.random.PRNGKey = None,
):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if rng is set.
"""
steps_per_epoch = len(dataset) // batch_size
if rng is not None:
batch_idx = jax.random.permutation(rng, len(dataset))
else:
batch_idx = jnp.arange(len(dataset))
batch_idx = batch_idx[
: steps_per_epoch * batch_size
] # Skip incomplete batch.
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
for idx in batch_idx:
batch = dataset[idx]
batch = {k: jnp.array(v) for k, v in batch.items()}
yield batch | Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if rng is set. | dataloader._dataloader_datasets_non_streaming | python | borisdayma/dalle-mini | src/dalle_mini/data.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/data.py | Apache-2.0 |
def dataloader(self, split, batch_size, epoch=None):
def _dataloader_datasets_non_streaming(
dataset: Dataset,
rng: jax.random.PRNGKey = None,
):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if rng is set.
"""
steps_per_epoch = len(dataset) // batch_size
if rng is not None:
batch_idx = jax.random.permutation(rng, len(dataset))
else:
batch_idx = jnp.arange(len(dataset))
batch_idx = batch_idx[
: steps_per_epoch * batch_size
] # Skip incomplete batch.
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
for idx in batch_idx:
batch = dataset[idx]
batch = {k: jnp.array(v) for k, v in batch.items()}
yield batch
def _dataloader_datasets_streaming(
dataset: Dataset,
epoch: int,
):
keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"]
batch = {k: [] for k in keys}
first_loop = True # stop after one loop in some cases
while (self.multi_hosts and split == "train") or first_loop:
# in multi-host, we run forever (no epoch) as hosts need to stop
# at the same time and training data may not be split equally
# For validation data we put the entire batch on each host and then
# keep only the one specific to each host (could be improved but not necessary)
if epoch is not None:
assert split == "train"
# reshuffle training data at each epoch
dataset.set_epoch(epoch)
epoch += 1
for item in dataset:
for k in keys:
batch[k].append(item[k])
if len(batch[keys[0]]) == batch_size:
batch = {k: jnp.array(v) for k, v in batch.items()}
yield batch
batch = {k: [] for k in keys}
first_loop = False
if split == "train":
ds = self.train_dataset
elif split == "eval":
ds = self.eval_dataset
else:
ds = self.other_eval_datasets[split]
if self.streaming:
return _dataloader_datasets_streaming(ds, epoch)
else:
if split == "train":
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
return _dataloader_datasets_non_streaming(ds, input_rng) | Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if rng is set. | dataloader | python | borisdayma/dalle-mini | src/dalle_mini/data.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/data.py | Apache-2.0 |
def shift_tokens_right(input_ids: np.array, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
return shifted_input_ids | Shift input ids one token to the right. | shift_tokens_right | python | borisdayma/dalle-mini | src/dalle_mini/data.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/data.py | Apache-2.0 |
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
"""
Initializes from a wandb artifact or delegates loading to the superclass.
"""
with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
if ":" in pretrained_model_name_or_path and not os.path.isdir(
pretrained_model_name_or_path
):
# wandb artifact
if wandb.run is not None:
artifact = wandb.run.use_artifact(pretrained_model_name_or_path)
else:
artifact = wandb.Api().artifact(pretrained_model_name_or_path)
pretrained_model_name_or_path = artifact.download(tmp_dir)
return super(PretrainedFromWandbMixin, cls).from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
) | Initializes from a wandb artifact or delegates loading to the superclass. | from_pretrained | python | borisdayma/dalle-mini | src/dalle_mini/model/utils.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/utils.py | Apache-2.0 |
def __call__(self, s):
"""Uses dynamic programming to infer the location of spaces in a string without spaces."""
l = [self._split(x) for x in self._SPLIT_RE.split(s)]
return " ".join([item for sublist in l for item in sublist]) | Uses dynamic programming to infer the location of spaces in a string without spaces. | __call__ | python | borisdayma/dalle-mini | src/dalle_mini/model/text.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/text.py | Apache-2.0 |
def _match(qs, ks):
"""Return True if regexes in qs match any window of strings in tuple ks."""
# compile regexes and force complete match
qts = tuple(map(lambda x: re.compile(x + "$"), qs))
for i in range(len(ks) - len(qs) + 1):
matches = [x.match(y) for x, y in zip(qts, ks[i:])]
if matches and all(matches):
return True
return False | Return True if regexes in qs match any window of strings in tuple ks. | _match | python | borisdayma/dalle-mini | src/dalle_mini/model/partitions.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/partitions.py | Apache-2.0 |
def smelu(beta: Any = 1.0):
"""
Implementation of "Real World Large Scale Recommendation Systems Reproducibility and Smooth Activations"
https://arxiv.org/abs/2202.06499
"""
@custom_jvp
@jax.jit
def _smelu(x: Any) -> Any:
x = jnp.where(x <= -beta, 0.0, x)
return jnp.where(x >= beta, x, jnp.square(x + beta) / (4 * beta))
_smelu.defjvps(
lambda g, ans, x: lax.select(
x == -beta,
lax.full_like(g, 0),
lax.select(x == beta, lax.full_like(g, 1), g),
)
)
return _smelu | Implementation of "Real World Large Scale Recommendation Systems Reproducibility and Smooth Activations"
https://arxiv.org/abs/2202.06499 | smelu | python | borisdayma/dalle-mini | src/dalle_mini/model/modeling.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/modeling.py | Apache-2.0 |
def dot_product_attention_weights(
query: Any,
key: Any,
bias: Optional[Any] = None,
mask: Optional[Any] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
is_encoder: bool = False,
tau=None,
):
"""
Computes dot-product attention weights given query and key.
mask is included into the bias.
Adapted from flax.linen.attention.dot_product_attention_weights"
"""
assert query.ndim == key.ndim, "q, k must have same rank."
assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
assert query.shape[-1] == key.shape[-1], "q, k depths must match."
# attn weight shape is (batch..., num_heads, q_length, kv_length)
attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
# divide by tau (used in Swin v2)
if tau is not None:
attn_weights = attn_weights / tau
else:
depth = query.shape[-1]
attn_weights = attn_weights / jnp.sqrt(depth).astype(dtype)
# apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias
# add relative position
if embed_pos is not None:
attn_weights = attn_weights + embed_pos
# normalize the attention weights
if not is_encoder or sinkhorn_iters == 1:
# sinkhorn does not work for causal (leaks info of future tokens into past)
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
else:
# adapted from https://github.com/lucidrains/sinkhorn-transformer
for i in range(sinkhorn_iters):
# when causal, some attn_weights have been set to -inf through bias
if i % 2 == 0:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-1, keepdims=True)
else:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-2, keepdims=True)
if mask is not None:
attn_weights = jnp.where(mask, attn_weights, -jnp.inf)
attn_weights = jnp.exp(attn_weights).astype(dtype)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights | Computes dot-product attention weights given query and key.
mask is included into the bias.
Adapted from flax.linen.attention.dot_product_attention_weights" | dot_product_attention_weights | python | borisdayma/dalle-mini | src/dalle_mini/model/modeling.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/modeling.py | Apache-2.0 |
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.q_proj(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
else:
# self_attention
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length = query_states.shape[1], key_states.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask,
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -jnp.inf).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8
)
# relative position embeddings
if self.config.use_swin_position_embeddings:
position_ids = jnp.arange(self.q_length)
embed_pos = self.rel_bias(position_ids)
embed_pos = rearrange(embed_pos, "q (k h) -> 1 h q k", h=self.num_heads)
else:
embed_pos = None
tau = self.tau if self.config.use_cosine_attention else None
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
mask=attention_mask,
embed_pos=embed_pos,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
sinkhorn_iters=self.config.sinkhorn_iters,
is_encoder=self.is_encoder,
tau=tau,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
if self.config.use_head_scale:
# per Normformer
attn_output = attn_output * self.head_scale
attn_output = self._merge_heads(attn_output)
if self.config.ln_positions in ["subln"] and not self.is_cross_attention:
attn_output = self.mid_layernorm(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights | Input shape: Batch x Time x Channel | __call__ | python | borisdayma/dalle-mini | src/dalle_mini/model/modeling.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/modeling.py | Apache-2.0 |
def generate(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
decoder_start_token_id: Optional[int] = None,
do_sample: Optional[bool] = None,
prng_key: Optional[jnp.ndarray] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
temperature: Optional[float] = None,
num_beams: Optional[int] = None,
no_repeat_ngram_size: Optional[int] = None,
min_length: Optional[int] = None,
forced_bos_token_id: Optional[int] = None,
forced_eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
early_stopping: Optional[bool] = None,
trace: bool = True,
params: Optional[Dict[str, jnp.ndarray]] = None,
condition_scale: Optional[float] = 1.0,
input_ids_uncond: Optional[jnp.ndarray] = None,
attention_mask_uncond: Optional[jnp.ndarray] = None,
**model_kwargs,
):
"""Edit: Allow super conditioning."""
# set init values
max_length = max_length if max_length is not None else self.config.max_length
bos_token_id = (
bos_token_id if bos_token_id is not None else self.config.bos_token_id
)
pad_token_id = (
pad_token_id if pad_token_id is not None else self.config.pad_token_id
)
eos_token_id = (
eos_token_id if eos_token_id is not None else self.config.eos_token_id
)
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id
else self.config.decoder_start_token_id
)
prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
if decoder_start_token_id is None and self.config.is_encoder_decoder:
raise ValueError(
"`decoder_start_token_id` has to be defined for encoder-decoder generation."
)
do_sample = do_sample if do_sample is not None else self.config.do_sample
num_beams = num_beams if num_beams is not None else self.config.num_beams
if self.config.is_encoder_decoder:
# add encoder_outputs to model_kwargs
if model_kwargs.get("encoder_outputs") is None:
model_kwargs_input = dict(model_kwargs)
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
input_ids,
params,
{"attention_mask": attention_mask, **model_kwargs_input},
)
if condition_scale != 1.0:
assert (
input_ids_uncond is not None
), "`input_ids_uncond` has to be defined for super conditioning."
assert (
do_sample is True
), "`do_sample` has to be True for super conditioning."
assert (
num_beams == 1
), "`num_beams` has to be 1 for super conditioning."
model_kwargs_uncond = (
self._prepare_encoder_decoder_kwargs_for_generation(
input_ids_uncond,
params,
{
"attention_mask": attention_mask_uncond,
**model_kwargs_input,
},
)
)
else:
model_kwargs_uncond = None
# prepare decoder_input_ids for generation
input_ids = (
jnp.ones((input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
)
if not do_sample and num_beams == 1:
logits_processor = self._get_logits_processor(
no_repeat_ngram_size,
min_length,
max_length,
eos_token_id,
forced_bos_token_id,
forced_eos_token_id,
)
return self._greedy_search(
input_ids,
max_length,
pad_token_id,
eos_token_id,
logits_processor=logits_processor,
trace=trace,
params=params,
model_kwargs=model_kwargs,
)
elif do_sample and num_beams == 1:
logits_warper = self._get_logits_warper(
top_k=top_k, top_p=top_p, temperature=temperature
)
logits_processor = self._get_logits_processor(
no_repeat_ngram_size,
min_length,
max_length,
eos_token_id,
forced_bos_token_id,
forced_eos_token_id,
)
return self._sample(
input_ids,
max_length,
pad_token_id,
eos_token_id,
prng_key,
logits_warper=logits_warper,
logits_processor=logits_processor,
trace=trace,
params=params,
model_kwargs=model_kwargs,
condition_scale=condition_scale,
model_kwargs_uncond=model_kwargs_uncond,
)
elif not do_sample and num_beams > 1:
# broadcast input_ids & encoder_outputs
input_ids = self._expand_to_num_beams(input_ids, num_beams=num_beams)
if "encoder_outputs" in model_kwargs:
model_kwargs["encoder_outputs"][
"last_hidden_state"
] = self._expand_to_num_beams(
model_kwargs["encoder_outputs"]["last_hidden_state"],
num_beams=num_beams,
)
if "attention_mask" in model_kwargs:
model_kwargs["attention_mask"] = self._expand_to_num_beams(
model_kwargs["attention_mask"], num_beams=num_beams
)
logits_processor = self._get_logits_processor(
no_repeat_ngram_size,
min_length,
max_length,
eos_token_id,
forced_bos_token_id,
forced_eos_token_id,
)
return self._beam_search(
input_ids,
max_length,
pad_token_id,
eos_token_id,
length_penalty=length_penalty,
early_stopping=early_stopping,
logits_processor=logits_processor,
trace=trace,
params=params,
model_kwargs=model_kwargs,
)
else:
raise NotImplementedError("`Beam sampling is currently not implemented.") | Edit: Allow super conditioning. | generate | python | borisdayma/dalle-mini | src/dalle_mini/model/modeling.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/modeling.py | Apache-2.0 |
def sample_search_cond_fn(state):
"""state termination condition fn."""
has_reached_max_length = state.cur_len == max_length
all_sequence_finished = jnp.all(state.is_sent_finished)
finish_generation = jnp.logical_or(
has_reached_max_length, all_sequence_finished
)
return ~finish_generation | state termination condition fn. | _sample.sample_search_cond_fn | python | borisdayma/dalle-mini | src/dalle_mini/model/modeling.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/modeling.py | Apache-2.0 |
def sample_search_body_fn(state):
"""state update fn."""
prng_key, prng_key_next = jax.random.split(state.prng_key)
model_outputs = model(
state.running_token, params=params, **state.model_kwargs
)
logits = model_outputs.logits[:, -1]
# perform super conditioning
# Source: @RiversHaveWings - https://twitter.com/RiversHaveWings/status/1478093658716966912?s=20&t=xdm-wZ61Wf7OLnE_NJHZ1w
if condition_scale != 1.0:
model_outputs_uncond = model(
state.running_token, params=params, **state.model_kwargs_uncond
)
logits_uncond = model_outputs_uncond.logits[:, -1]
logits = logits_uncond + condition_scale * (logits - logits_uncond)
else:
model_outputs_uncond = None
# apply min_length, ...
logits = logits_processor(state.sequences, logits, state.cur_len)
# apply top_k, top_k, temperature
logits = logits_warper(logits, logits, state.cur_len)
next_token = jax.random.categorical(prng_key, logits, axis=-1)
next_is_sent_finished = state.is_sent_finished | (
next_token == eos_token_id
)
next_token = (
next_token * ~next_is_sent_finished
+ pad_token_id * next_is_sent_finished
)
next_token = next_token[:, None]
next_sequences = lax.dynamic_update_slice(
state.sequences, next_token, (0, state.cur_len)
)
next_model_kwargs = self.update_inputs_for_generation(
model_outputs, state.model_kwargs
)
next_model_kwargs_uncond = (
self.update_inputs_for_generation(
model_outputs_uncond, state.model_kwargs_uncond
)
if condition_scale != 1.0
else None
)
return SampleState(
cur_len=state.cur_len + 1,
sequences=next_sequences,
running_token=next_token,
is_sent_finished=next_is_sent_finished,
model_kwargs=next_model_kwargs,
model_kwargs_uncond=next_model_kwargs_uncond,
prng_key=prng_key_next,
) | state update fn. | _sample.sample_search_body_fn | python | borisdayma/dalle-mini | src/dalle_mini/model/modeling.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/modeling.py | Apache-2.0 |
def _sample(
self,
input_ids: None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
prng_key: Optional[jnp.ndarray] = None,
logits_processor=None,
logits_warper=None,
trace: bool = True,
params: Optional[Dict[str, jnp.ndarray]] = None,
model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
condition_scale: float = 1.0,
model_kwargs_uncond: Optional[Dict[str, jnp.ndarray]] = None,
):
# init values
max_length = max_length if max_length is not None else self.config.max_length
pad_token_id = (
pad_token_id if pad_token_id is not None else self.config.pad_token_id
)
eos_token_id = (
eos_token_id if eos_token_id is not None else self.config.eos_token_id
)
prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
batch_size, cur_len = input_ids.shape
eos_token_id = jnp.array(eos_token_id)
pad_token_id = jnp.array(pad_token_id)
cur_len = jnp.array(cur_len)
# per batch-item holding current token in loop.
sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
# per batch-item state bit indicating if sentence has finished.
is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
# For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
# and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
model = self.decode if self.config.is_encoder_decoder else self
# initialize model specific kwargs
model_kwargs = self.prepare_inputs_for_generation(
input_ids, max_length, **model_kwargs
)
if condition_scale != 1.0:
model_kwargs_uncond = self.prepare_inputs_for_generation(
input_ids, max_length, **model_kwargs_uncond
)
# initialize state
state = SampleState(
cur_len=cur_len,
sequences=sequences,
running_token=input_ids,
is_sent_finished=is_sent_finished,
prng_key=prng_key,
model_kwargs=model_kwargs,
model_kwargs_uncond=model_kwargs_uncond,
)
def sample_search_cond_fn(state):
"""state termination condition fn."""
has_reached_max_length = state.cur_len == max_length
all_sequence_finished = jnp.all(state.is_sent_finished)
finish_generation = jnp.logical_or(
has_reached_max_length, all_sequence_finished
)
return ~finish_generation
def sample_search_body_fn(state):
"""state update fn."""
prng_key, prng_key_next = jax.random.split(state.prng_key)
model_outputs = model(
state.running_token, params=params, **state.model_kwargs
)
logits = model_outputs.logits[:, -1]
# perform super conditioning
# Source: @RiversHaveWings - https://twitter.com/RiversHaveWings/status/1478093658716966912?s=20&t=xdm-wZ61Wf7OLnE_NJHZ1w
if condition_scale != 1.0:
model_outputs_uncond = model(
state.running_token, params=params, **state.model_kwargs_uncond
)
logits_uncond = model_outputs_uncond.logits[:, -1]
logits = logits_uncond + condition_scale * (logits - logits_uncond)
else:
model_outputs_uncond = None
# apply min_length, ...
logits = logits_processor(state.sequences, logits, state.cur_len)
# apply top_k, top_k, temperature
logits = logits_warper(logits, logits, state.cur_len)
next_token = jax.random.categorical(prng_key, logits, axis=-1)
next_is_sent_finished = state.is_sent_finished | (
next_token == eos_token_id
)
next_token = (
next_token * ~next_is_sent_finished
+ pad_token_id * next_is_sent_finished
)
next_token = next_token[:, None]
next_sequences = lax.dynamic_update_slice(
state.sequences, next_token, (0, state.cur_len)
)
next_model_kwargs = self.update_inputs_for_generation(
model_outputs, state.model_kwargs
)
next_model_kwargs_uncond = (
self.update_inputs_for_generation(
model_outputs_uncond, state.model_kwargs_uncond
)
if condition_scale != 1.0
else None
)
return SampleState(
cur_len=state.cur_len + 1,
sequences=next_sequences,
running_token=next_token,
is_sent_finished=next_is_sent_finished,
model_kwargs=next_model_kwargs,
model_kwargs_uncond=next_model_kwargs_uncond,
prng_key=prng_key_next,
)
# The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
if input_ids.shape[1] > 1:
state = sample_search_body_fn(state)
if not trace:
state = self._run_loop_in_debug(
sample_search_cond_fn, sample_search_body_fn, state
)
else:
state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state)
return FlaxSampleOutput(sequences=state.sequences) | state termination condition fn. | _sample | python | borisdayma/dalle-mini | src/dalle_mini/model/modeling.py | https://github.com/borisdayma/dalle-mini/blob/master/src/dalle_mini/model/modeling.py | Apache-2.0 |
def so3_rft(x, b, grid):
"""
Real Fourier Transform
:param x: [..., beta_alpha_gamma]
:param b: output bandwidth signal
:param grid: tuple of (beta, alpha, gamma) tuples
:return: [l * m * n, ..., complex]
"""
# F is the Fourier matrix
F = _setup_so3_ft(b, grid, device_type=x.device.type, device_index=x.device.index) # [beta_alpha_gamma, l * m * n, complex]
assert x.size(-1) == F.size(0)
sz = x.size()
x = torch.einsum("ia,afc->fic", (x.view(-1, x.size(-1)), F.clone())) # [l * m * n, ..., complex]
x = x.view(-1, *sz[:-1], 2)
return x | Real Fourier Transform
:param x: [..., beta_alpha_gamma]
:param b: output bandwidth signal
:param grid: tuple of (beta, alpha, gamma) tuples
:return: [l * m * n, ..., complex] | so3_rft | python | jonkhler/s2cnn | s2cnn/so3_ft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/so3_ft.py | MIT |
def s2_near_identity_grid(max_beta=np.pi / 8, n_alpha=8, n_beta=3):
'''
:return: rings around the north pole
size of the kernel = n_alpha * n_beta
'''
beta = np.arange(start=1, stop=n_beta + 1, dtype=np.float) * max_beta / n_beta
alpha = np.linspace(start=0, stop=2 * np.pi, num=n_alpha, endpoint=False)
B, A = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple(tuple(ba) for ba in grid) | :return: rings around the north pole
size of the kernel = n_alpha * n_beta | s2_near_identity_grid | python | jonkhler/s2cnn | s2cnn/s2_grid.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/s2_grid.py | MIT |
def s2_equatorial_grid(max_beta=0, n_alpha=32, n_beta=1):
'''
:return: rings around the equator
size of the kernel = n_alpha * n_beta
'''
beta = np.linspace(start=np.pi/2 - max_beta, stop=np.pi/2 + max_beta, num=n_beta, endpoint=True)
alpha = np.linspace(start=0, stop=2 * np.pi, num=n_alpha, endpoint=False)
B, A = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple(tuple(ba) for ba in grid) | :return: rings around the equator
size of the kernel = n_alpha * n_beta | s2_equatorial_grid | python | jonkhler/s2cnn | s2cnn/s2_grid.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/s2_grid.py | MIT |
def s2_mm(x, y):
'''
:param x: [l * m, batch, feature_in, complex]
:param y: [l * m, feature_in, feature_out, complex]
:return: [l * m * n, batch, feature_out, complex]
'''
from s2cnn.utils.complex import complex_mm
assert y.size(3) == 2
assert x.size(3) == 2
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert y.size(1) == nfeature_in
nspec = x.size(0)
assert y.size(0) == nspec
if x.is_cuda:
return _cuda_S2_mm.apply(x, y)
nl = round(nspec**0.5)
Fz_list = []
begin = 0
for l in range(nl):
L = 2 * l + 1
size = L
Fx = x[begin:begin+size] # [m, batch, feature_in, complex]
Fy = y[begin:begin+size] # [m, feature_in, feature_out, complex]
Fx = Fx.view(L * nbatch, nfeature_in, 2) # [m * batch, feature_in, complex]
Fy = Fy.transpose(0, 1) # [feature_in, m, feature_out, complex]
Fy = Fy.contiguous()
Fy = Fy.view(nfeature_in, L * nfeature_out, 2) # [feature_in, m * feature_out, complex]
Fz = complex_mm(Fx, Fy, conj_y=True) # [m_x * batch, m_y * feature_out, complex] m_x -> m, m_y -> n
Fz = Fz.view(L, nbatch, L, nfeature_out, 2) # [m, batch, n, feature_out, complex]
Fz = Fz.transpose(1, 2) # [m, n, batch, feature_out, complex]
Fz = Fz.contiguous()
Fz = Fz.view(L * L, nbatch, nfeature_out, 2) # [m * n, batch, feature_out, complex]
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0) # [l * m * n, batch, feature_out, complex]
return z | :param x: [l * m, batch, feature_in, complex]
:param y: [l * m, feature_in, feature_out, complex]
:return: [l * m * n, batch, feature_out, complex] | s2_mm | python | jonkhler/s2cnn | s2cnn/s2_mm.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/s2_mm.py | MIT |
def _cuda_s2_mm(x, y):
'''
:param x: [l * m, batch, feature_in, complex]
:param y: [l * m, feature_in, feature_out, complex]
:return: [l * m * n, batch, feature_out, complex]
'''
import s2cnn.utils.cuda as cuda_utils
assert x.is_cuda and x.dtype == torch.float32
assert y.is_cuda and y.dtype == torch.float32
assert y.size(3) == 2
assert x.size(3) == 2
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert y.size(1) == nfeature_in
assert y.size(0) == x.size(0)
nl = round(x.size(0) ** 0.5)
nspec = (4 * nl ** 2 - 1) * nl // 3
assert x.size(0) == nl ** 2
assert y.size(0) == nl ** 2
device = torch.cuda.current_device()
cuda_kernel = _setup_s2mm_cuda_kernel(nbatch=nbatch, nspec=nspec, nfeature_in=nfeature_in,
nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1),
grid=(cuda_utils.get_blocks(nspec * nbatch * nfeature_out, 1024), 1, 1),
args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()],
stream=stream)
# [l * m * n, batch, feature_out, complex]
return output | :param x: [l * m, batch, feature_in, complex]
:param y: [l * m, feature_in, feature_out, complex]
:return: [l * m * n, batch, feature_out, complex] | _cuda_s2_mm | python | jonkhler/s2cnn | s2cnn/s2_mm.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/s2_mm.py | MIT |
def so3_near_identity_grid(max_beta=np.pi / 8, max_gamma=2*np.pi, n_alpha=8, n_beta=3, n_gamma=None):
'''
:return: rings of rotations around the identity, all points (rotations) in
a ring are at the same distance from the identity
size of the kernel = n_alpha * n_beta * n_gamma
'''
if n_gamma is None:
n_gamma = n_alpha # similar to regular representations
beta = np.arange(start=1, stop=n_beta + 1, dtype=np.float) * max_beta / n_beta
alpha = np.linspace(start=0, stop=2 * np.pi, num=n_alpha, endpoint=False)
pre_gamma = np.linspace(start=-max_gamma, stop=max_gamma, num=n_gamma, endpoint=True)
B, A, preC = np.meshgrid(beta, alpha, pre_gamma, indexing='ij')
C = preC - A
B = B.flatten()
A = A.flatten()
C = C.flatten()
grid = np.stack((B, A, C), axis=1)
if sum(grid[:, 0] == 0) > 1:
warnings.warn("Gimbal lock: beta take value 0 in the grid")
return tuple(tuple(bac) for bac in grid) | :return: rings of rotations around the identity, all points (rotations) in
a ring are at the same distance from the identity
size of the kernel = n_alpha * n_beta * n_gamma | so3_near_identity_grid | python | jonkhler/s2cnn | s2cnn/so3_grid.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/so3_grid.py | MIT |
def so3_equatorial_grid(max_beta=0, max_gamma=np.pi / 8, n_alpha=32, n_beta=1, n_gamma=2):
'''
:return: rings of rotations around the equator.
size of the kernel = n_alpha * n_beta * n_gamma
'''
beta = np.linspace(start=np.pi/2 - max_beta, stop=np.pi/2 + max_beta, num=n_beta, endpoint=True)
alpha = np.linspace(start=0, stop=2 * np.pi, num=n_alpha, endpoint=False)
gamma = np.linspace(start=-max_gamma, stop=max_gamma, num=n_gamma, endpoint=True)
B, A, C = np.meshgrid(beta, alpha, gamma, indexing='ij')
B = B.flatten()
A = A.flatten()
C = C.flatten()
grid = np.stack((B, A, C), axis=1)
if sum(grid[:, 0] == 0) > 1:
warnings.warn("Gimbal lock: beta take value 0 in the grid")
return tuple(tuple(bac) for bac in grid) | :return: rings of rotations around the equator.
size of the kernel = n_alpha * n_beta * n_gamma | so3_equatorial_grid | python | jonkhler/s2cnn | s2cnn/so3_grid.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/so3_grid.py | MIT |
def s2_rft(x, b, grid):
"""
Real Fourier Transform
:param x: [..., beta_alpha]
:param b: output bandwidth signal
:param grid: tuple of (beta, alpha) tuples
:return: [l * m, ..., complex]
"""
# F is the Fourier matrix
F = _setup_s2_ft(b, grid, device_type=x.device.type, device_index=x.device.index) # [beta_alpha, l * m, complex]
assert x.size(-1) == F.size(0)
sz = x.size()
x = torch.einsum("ia,afc->fic", (x.view(-1, x.size(-1)), F.clone())) # [l * m, ..., complex]
x = x.view(-1, *sz[:-1], 2)
return x | Real Fourier Transform
:param x: [..., beta_alpha]
:param b: output bandwidth signal
:param grid: tuple of (beta, alpha) tuples
:return: [l * m, ..., complex] | s2_rft | python | jonkhler/s2cnn | s2cnn/s2_ft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/s2_ft.py | MIT |
def so3_mm(x, y):
'''
:param x: [l * m * n, batch, feature_in, complex]
:param y: [l * m * n, feature_in, feature_out, complex]
:return: [l * m * n, batch, feature_out, complex]
'''
from s2cnn.utils.complex import complex_mm
import math
assert y.size(3) == 2
assert x.size(3) == 2
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert y.size(1) == nfeature_in
nspec = x.size(0)
assert y.size(0) == nspec
nl = math.ceil((3 / 4 * nspec) ** (1 / 3))
assert nspec == nl * (4 * nl ** 2 - 1) // 3
if x.is_cuda:
return _cuda_SO3_mm.apply(x, y)
Fz_list = []
begin = 0
for l in range(nl):
L = 2 * l + 1
size = L ** 2
Fx = x[begin:begin + size] # [m * n, batch, feature_in, complex]
Fy = y[begin:begin + size] # [m * n, feature_in, feature_out, complex]
Fx = Fx.view(L, L, nbatch, nfeature_in, 2) # [m, n, batch, feature_in, complex]
Fx = Fx.transpose(0, 1) # [n, m, batch, feature_in, complex]
Fx = Fx.transpose(0, 2) # [batch, m, n, feature_in, complex]
Fx = Fx.transpose(2, 3) # [batch, m, feature_in, n, complex]
Fx = Fx.contiguous()
Fx = Fx.view(nbatch * L, nfeature_in * L, 2) # [batch * m, feature_in * n, complex]
Fy = Fy.view(L, L, nfeature_in, nfeature_out, 2) # [m, n, feature_in, feature_out, complex]
Fy = Fy.transpose(0, 2) # [feature_in, n, m, feature_out, complex]
Fy = Fy.contiguous()
Fy = Fy.view(nfeature_in * L, L * nfeature_out, 2) # [feature_in * n, m * feature_out, complex]
Fz = complex_mm(Fx, Fy, conj_y=True) # [batch * m_x, m_y * feature_out, complex] m_x -> m, m_y -> n
Fz = Fz.view(nbatch, L * L, nfeature_out, 2) # [batch, m * n, feature_out, complex]
Fz = Fz.transpose(0, 1) # [m * n, batch, feature_out, complex]
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0) # [l * m * n, batch, feature_out, complex]
return z | :param x: [l * m * n, batch, feature_in, complex]
:param y: [l * m * n, feature_in, feature_out, complex]
:return: [l * m * n, batch, feature_out, complex] | so3_mm | python | jonkhler/s2cnn | s2cnn/so3_mm.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/so3_mm.py | MIT |
def forward(ctx, x, y): # pylint: disable=W
'''
:param x: [l * m * n, batch, feature_in, complex]
:param y: [l * m * n, feature_in, feature_out, complex]
:return: [l * m * n, batch, feature_out, complex]
'''
assert x.is_cuda and x.dtype == torch.float32
assert y.is_cuda and y.dtype == torch.float32
assert y.size(3) == 2
assert x.size(3) == 2
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert y.size(1) == nfeature_in
nspec = x.size(0)
assert y.size(0) == nspec
nl = round((3 / 4 * nspec) ** (1 / 3))
assert nspec == nl * (4 * nl ** 2 - 1) // 3
ctx.save_for_backward(x, y)
device = torch.cuda.current_device()
cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_out, nk=nfeature_in, conj_y=True,
trans_y_spec=True, device=device)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(x, y, output) # [l * m * n, batch, feature_out, complex]
return output | :param x: [l * m * n, batch, feature_in, complex]
:param y: [l * m * n, feature_in, feature_out, complex]
:return: [l * m * n, batch, feature_out, complex] | forward | python | jonkhler/s2cnn | s2cnn/so3_mm.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/so3_mm.py | MIT |
def _setup_so3mm_cuda_kernel(nl, ni, nj, nk,
conj_x=False, conj_y=False,
trans_x_spec=False, trans_x_feature=False,
trans_y_spec=False, trans_y_feature=False,
trans_out_feature=False, device=0):
'''
return a function that computes
out[l*m*n, i, j] = sum_k sum_p x[l*m*p, i, k] y[l*p*n, k, j]
where out, x, y are complex valued
if conj_x is set to True, x is conjugated
if conj_y is set to True, y is conjugated
if trans_x_spec is set to True m and p are permuted in x[...]
if trans_y_spec is set to True p and n are permuted in y[...]
if trans_x_feature is set to True i and k are permuted in x[...]
if trans_y_feature is set to True k and j are permuted in y[...]
if trans_out_feature is set to True i and j are permuted in out[...]
'''
kernel = '''
#define NI {}
#define NJ {}
#define NK {}
'''.format(ni, nj, nk)
if not trans_x_spec and not trans_x_feature:
kernel += '#define INDEX_X (((L0 + m * L + p) * NI + i) * NK + k)\n'
if not trans_x_spec and trans_x_feature:
kernel += '#define INDEX_X (((L0 + m * L + p) * NK + k) * NI + i)\n'
if trans_x_spec and not trans_x_feature:
kernel += '#define INDEX_X (((L0 + p * L + m) * NI + i) * NK + k)\n'
if trans_x_spec and trans_x_feature:
kernel += '#define INDEX_X (((L0 + p * L + m) * NK + k) * NI + i)\n'
if not trans_y_spec and not trans_y_feature:
kernel += '#define INDEX_Y (((L0 + p * L + n) * NK + k) * NJ + j)\n'
if not trans_y_spec and trans_y_feature:
kernel += '#define INDEX_Y (((L0 + p * L + n) * NJ + j) * NK + k)\n'
if trans_y_spec and not trans_y_feature:
kernel += '#define INDEX_Y (((L0 + n * L + p) * NK + k) * NJ + j)\n'
if trans_y_spec and trans_y_feature:
kernel += '#define INDEX_Y (((L0 + n * L + p) * NJ + j) * NK + k)\n'
if not trans_out_feature:
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NI + i) * NJ + j)\n'
if trans_out_feature:
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NJ + j) * NI + i)\n'
kernel += '''
#define CONJ_X {}
#define CONJ_Y {}
'''.format("x_im = -x_im;" if conj_x else ";", "y_im = -y_im;" if conj_y else ";")
kernel += '''
#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))
extern "C"
__global__ void main_(const float* in_x, const float* in_y, float* out)
{
// start of thread independant code
int l = blockIdx.z;
int L = 2 * l + 1;
int L0 = (4 * l*l - 1) * l / 3;
if (blockIdx.y * 32 >= L * NI || blockIdx.x * 32 >= L * NJ) {
return;
}
int ntile = CEIL_DIV(L * NK, 32);
// end of thread independant code
int mi = blockIdx.y * 32 + threadIdx.y;
int m = mi / NI;
int i = mi % NI;
int nj = blockIdx.x * 32 + threadIdx.x;
int n = nj / NJ;
int j = nj % NJ;
float sum_re = 0.0;
float sum_im = 0.0;
for (int tile = 0; tile < ntile; ++tile) {
__shared__ float tileX[2][32][32];
__shared__ float tileY[2][32][32];
int pk = tile * 32 + threadIdx.x;
int p = pk / NK;
int k = pk % NK;
int index = INDEX_X * 2;
tileX[0][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 0] : 0.0;
tileX[1][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 1] : 0.0;
pk = tile * 32 + threadIdx.y;
p = pk / NK;
k = pk % NK;
index = INDEX_Y * 2;
tileY[0][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 0] : 0.0;
tileY[1][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 1] : 0.0;
__syncthreads();
for (int any = 0; any < 32; ++any) {
float x_re = tileX[0][threadIdx.y][any];
float x_im = tileX[1][threadIdx.y][any];
float y_re = tileY[0][any][threadIdx.x];
float y_im = tileY[1][any][threadIdx.x];
CONJ_X
CONJ_Y
sum_re += x_re * y_re - x_im * y_im;
sum_im += x_re * y_im + x_im * y_re;
}
__syncthreads();
}
if (m < L && n < L) {
int index = INDEX_OUT * 2;
out[index + 0] = sum_re;
out[index + 1] = sum_im;
}
}
'''
import s2cnn.utils.cuda as cuda_utils
kernel = cuda_utils.compile_kernel(kernel, 'so3_mm.cu', 'main_')
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
def fun(x, y, output):
assert output.is_contiguous()
kernel(block=(32, 32, 1),
grid=(math.ceil((2 * nl - 1) * nj / 32), math.ceil((2 * nl - 1) * ni / 32), nl),
args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()],
stream=stream)
return fun | return a function that computes
out[l*m*n, i, j] = sum_k sum_p x[l*m*p, i, k] y[l*p*n, k, j]
where out, x, y are complex valued
if conj_x is set to True, x is conjugated
if conj_y is set to True, y is conjugated
if trans_x_spec is set to True m and p are permuted in x[...]
if trans_y_spec is set to True p and n are permuted in y[...]
if trans_x_feature is set to True i and k are permuted in x[...]
if trans_y_feature is set to True k and j are permuted in y[...]
if trans_out_feature is set to True i and j are permuted in out[...] | _setup_so3mm_cuda_kernel | python | jonkhler/s2cnn | s2cnn/so3_mm.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/so3_mm.py | MIT |
def as_complex(x):
"""
In pytorch, a complex array is represented as a real array with an extra length-2 axis at the end.
This function takes a real-valued array x and adds complex axis where the real part is set to x and the imaginary part is set to 0.
"""
imaginary = torch.zeros_like(x)
z = torch.stack((x, imaginary), dim=x.ndimension())
return z | In pytorch, a complex array is represented as a real array with an extra length-2 axis at the end.
This function takes a real-valued array x and adds complex axis where the real part is set to x and the imaginary part is set to 0. | as_complex | python | jonkhler/s2cnn | s2cnn/utils/complex.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/utils/complex.py | MIT |
def complex_mm(x, y, conj_x=False, conj_y=False):
'''
:param x: [i, k, complex] (M, K, 2)
:param y: [k, j, complex] (K, N, 2)
:return: [i, j, complex] (M, N, 2)
'''
xr = x[:, :, 0]
xi = x[:, :, 1]
yr = y[:, :, 0]
yi = y[:, :, 1]
if not conj_x and not conj_y:
zr = torch.mm(xr, yr) - torch.mm(xi, yi)
zi = torch.mm(xr, yi) + torch.mm(xi, yr)
if conj_x and not conj_y:
zr = torch.mm(xr, yr) + torch.mm(xi, yi)
zi = torch.mm(xr, yi) - torch.mm(xi, yr)
if not conj_x and conj_y:
zr = torch.mm(xr, yr) + torch.mm(xi, yi)
zi = torch.mm(xi, yr) - torch.mm(xr, yi)
if conj_x and conj_y:
zr = torch.mm(xr, yr) - torch.mm(xi, yi)
zi = - torch.mm(xr, yi) - torch.mm(xi, yr)
return torch.stack((zr, zi), 2) | :param x: [i, k, complex] (M, K, 2)
:param y: [k, j, complex] (K, N, 2)
:return: [i, j, complex] (M, N, 2) | complex_mm | python | jonkhler/s2cnn | s2cnn/utils/complex.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/utils/complex.py | MIT |
def wrapper(*args):
'''
The wrapper of the function
'''
try:
os.makedirs(dirname)
except FileExistsError:
pass
indexfile = os.path.join(dirname, "index.pkl")
try:
with open(indexfile, "rb") as file:
index = pickle.load(file)
except FileNotFoundError:
index = {}
try:
filename = index[args]
except KeyError:
index[args] = filename = "{}.pkl.gz".format(len(index))
with open(indexfile, "wb") as file:
pickle.dump(index, file)
filepath = os.path.join(dirname, filename)
try:
with gzip.open(filepath, "rb") as file:
print("load {}... ".format(filename), end="")
result = pickle.load(file)
except FileNotFoundError:
print("compute {}... ".format(filename), end="")
sys.stdout.flush()
result = func(*args)
print("save {}... ".format(filename), end="")
with gzip.open(filepath, "wb") as file:
pickle.dump(result, file)
print("done")
return result | The wrapper of the function | cached_dirpklgz.cached_dirpklgz.decorator.wrapper | python | jonkhler/s2cnn | s2cnn/utils/decorator.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/utils/decorator.py | MIT |
def decorator(func):
'''
The actual decorator
'''
@lru_cache(maxsize=None)
@wraps(func)
def wrapper(*args):
'''
The wrapper of the function
'''
try:
os.makedirs(dirname)
except FileExistsError:
pass
indexfile = os.path.join(dirname, "index.pkl")
try:
with open(indexfile, "rb") as file:
index = pickle.load(file)
except FileNotFoundError:
index = {}
try:
filename = index[args]
except KeyError:
index[args] = filename = "{}.pkl.gz".format(len(index))
with open(indexfile, "wb") as file:
pickle.dump(index, file)
filepath = os.path.join(dirname, filename)
try:
with gzip.open(filepath, "rb") as file:
print("load {}... ".format(filename), end="")
result = pickle.load(file)
except FileNotFoundError:
print("compute {}... ".format(filename), end="")
sys.stdout.flush()
result = func(*args)
print("save {}... ".format(filename), end="")
with gzip.open(filepath, "wb") as file:
pickle.dump(result, file)
print("done")
return result
return wrapper | The actual decorator | cached_dirpklgz.decorator | python | jonkhler/s2cnn | s2cnn/utils/decorator.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/utils/decorator.py | MIT |
def cached_dirpklgz(dirname):
'''
Cache a function with a directory
'''
def decorator(func):
'''
The actual decorator
'''
@lru_cache(maxsize=None)
@wraps(func)
def wrapper(*args):
'''
The wrapper of the function
'''
try:
os.makedirs(dirname)
except FileExistsError:
pass
indexfile = os.path.join(dirname, "index.pkl")
try:
with open(indexfile, "rb") as file:
index = pickle.load(file)
except FileNotFoundError:
index = {}
try:
filename = index[args]
except KeyError:
index[args] = filename = "{}.pkl.gz".format(len(index))
with open(indexfile, "wb") as file:
pickle.dump(index, file)
filepath = os.path.join(dirname, filename)
try:
with gzip.open(filepath, "rb") as file:
print("load {}... ".format(filename), end="")
result = pickle.load(file)
except FileNotFoundError:
print("compute {}... ".format(filename), end="")
sys.stdout.flush()
result = func(*args)
print("save {}... ".format(filename), end="")
with gzip.open(filepath, "wb") as file:
pickle.dump(result, file)
print("done")
return result
return wrapper
return decorator | Cache a function with a directory | cached_dirpklgz | python | jonkhler/s2cnn | s2cnn/utils/decorator.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/utils/decorator.py | MIT |
def so3_integrate(x):
"""
Integrate a signal on SO(3) using the Haar measure
:param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)
:return y: [...] (...)
"""
assert x.size(-1) == x.size(-2)
assert x.size(-2) == x.size(-3)
b = x.size(-1) // 2
w = _setup_so3_integrate(b, device_type=x.device.type, device_index=x.device.index) # [beta]
x = torch.sum(x, dim=-1).squeeze(-1) # [..., beta, alpha]
x = torch.sum(x, dim=-1).squeeze(-1) # [..., beta]
sz = x.size()
x = x.view(-1, 2 * b)
w = w.view(2 * b, 1)
x = torch.mm(x, w).squeeze(-1)
x = x.view(*sz[:-1])
return x | Integrate a signal on SO(3) using the Haar measure
:param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)
:return y: [...] (...) | so3_integrate | python | jonkhler/s2cnn | s2cnn/soft/so3_integrate.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_integrate.py | MIT |
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
'''
:param nfeature_in: number of input fearures
:param nfeature_out: number of output features
:param b_in: input bandwidth (precision of the input SOFT grid)
:param b_out: output bandwidth
:param grid: points of the SO(3) group defining the kernel, tuple of (alpha, beta, gamma)'s
'''
super(SO3Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_(-1, 1))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1))
# When useing ADAM optimizer, the variance of each componant of the gradient
# is normalized by ADAM around 1.
# Then it is suited to have parameters of order one.
# Therefore the scaling, needed for the proper forward propagation, is done "outside" of the parameters
self.scaling = 1. / math.sqrt(len(self.grid) * self.nfeature_in * (self.b_out ** 3.) / (self.b_in ** 3.)) | :param nfeature_in: number of input fearures
:param nfeature_out: number of output features
:param b_in: input bandwidth (precision of the input SOFT grid)
:param b_out: output bandwidth
:param grid: points of the SO(3) group defining the kernel, tuple of (alpha, beta, gamma)'s | __init__ | python | jonkhler/s2cnn | s2cnn/soft/so3_conv.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_conv.py | MIT |
def forward(self, x): # pylint: disable=W
'''
:x: [batch, feature_in, beta, alpha, gamma]
:return: [batch, feature_out, beta, alpha, gamma]
'''
assert x.size(1) == self.nfeature_in
assert x.size(2) == 2 * self.b_in
assert x.size(3) == 2 * self.b_in
assert x.size(4) == 2 * self.b_in
x = SO3_fft_real.apply(x, self.b_out) # [l * m * n, batch, feature_in, complex]
y = so3_rft(self.kernel * self.scaling, self.b_out, self.grid) # [l * m * n, feature_in, feature_out, complex]
assert x.size(0) == y.size(0)
assert x.size(2) == y.size(1)
z = so3_mm(x, y) # [l * m * n, batch, feature_out, complex]
assert z.size(0) == x.size(0)
assert z.size(1) == x.size(1)
assert z.size(2) == y.size(2)
z = SO3_ifft_real.apply(z) # [batch, feature_out, beta, alpha, gamma]
z = z + self.bias
return z | :x: [batch, feature_in, beta, alpha, gamma]
:return: [batch, feature_out, beta, alpha, gamma] | forward | python | jonkhler/s2cnn | s2cnn/soft/so3_conv.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_conv.py | MIT |
def forward(self, x): # pylint: disable=W
'''
:x: [batch, feature_in, beta, alpha, gamma]
:return: [batch, feature_out, beta, alpha, gamma]
'''
if self.conv is not None:
return self.conv(x)
else:
return x | :x: [batch, feature_in, beta, alpha, gamma]
:return: [batch, feature_out, beta, alpha, gamma] | forward | python | jonkhler/s2cnn | s2cnn/soft/so3_conv.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_conv.py | MIT |
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
'''
:param nfeature_in: number of input fearures
:param nfeature_out: number of output features
:param b_in: input bandwidth (precision of the input SOFT grid)
:param b_out: output bandwidth
:param grid: points of the sphere defining the kernel, tuple of (alpha, beta)'s
'''
super(S2Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_(-1, 1))
self.scaling = 1. / math.sqrt(len(self.grid) * self.nfeature_in * (self.b_out ** 4.) / (self.b_in ** 2.))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1)) | :param nfeature_in: number of input fearures
:param nfeature_out: number of output features
:param b_in: input bandwidth (precision of the input SOFT grid)
:param b_out: output bandwidth
:param grid: points of the sphere defining the kernel, tuple of (alpha, beta)'s | __init__ | python | jonkhler/s2cnn | s2cnn/soft/s2_conv.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/s2_conv.py | MIT |
def forward(self, x): # pylint: disable=W
'''
:x: [batch, feature_in, beta, alpha]
:return: [batch, feature_out, beta, alpha, gamma]
'''
assert x.size(1) == self.nfeature_in
assert x.size(2) == 2 * self.b_in
assert x.size(3) == 2 * self.b_in
x = S2_fft_real.apply(x, self.b_out) # [l * m, batch, feature_in, complex]
y = s2_rft(self.kernel * self.scaling, self.b_out, self.grid) # [l * m, feature_in, feature_out, complex]
z = s2_mm(x, y) # [l * m * n, batch, feature_out, complex]
z = SO3_ifft_real.apply(z) # [batch, feature_out, beta, alpha, gamma]
z = z + self.bias
return z | :x: [batch, feature_in, beta, alpha]
:return: [batch, feature_out, beta, alpha, gamma] | forward | python | jonkhler/s2cnn | s2cnn/soft/s2_conv.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/s2_conv.py | MIT |
def so3_rotation(x, alpha, beta, gamma):
'''
:param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)
'''
b = x.size()[-1] // 2
x_size = x.size()
Us = _setup_so3_rotation(b, alpha, beta, gamma, device_type=x.device.type, device_index=x.device.index)
# fourier transform
x = SO3_fft_real.apply(x) # [l * m * n, ..., complex]
# rotated spectrum
Fz_list = []
begin = 0
for l in range(b):
L = 2 * l + 1
size = L ** 2
Fx = x[begin:begin+size]
Fx = Fx.view(L, -1, 2) # [m, n * batch, complex]
U = Us[l].view(L, L, 2) # [m, n, complex]
Fz = complex_mm(U, Fx, conj_x=True) # [m, n * batch, complex]
Fz = Fz.view(size, -1, 2) # [m * n, batch, complex]
Fz_list.append(Fz)
begin += size
Fz = torch.cat(Fz_list, 0) # [l * m * n, batch, complex]
z = SO3_ifft_real.apply(Fz)
z = z.contiguous()
z = z.view(*x_size)
return z | :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b) | so3_rotation | python | jonkhler/s2cnn | s2cnn/soft/so3_rotation.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_rotation.py | MIT |
def s2_fft(x, for_grad=False, b_out=None):
'''
:param x: [..., beta, alpha, complex]
:return: [l * m, ..., complex]
'''
assert x.size(-1) == 2
b_in = x.size(-2) // 2
assert x.size(-2) == 2 * b_in
assert x.size(-3) == 2 * b_in
if b_out is None:
b_out = b_in
assert b_out <= b_in
batch_size = x.size()[:-3]
x = x.view(-1, 2 * b_in, 2 * b_in, 2) # [batch, beta, alpha, complex]
'''
:param x: [batch, beta, alpha, complex] (nbatch, 2 * b_in, 2 * b_in, 2)
:return: [l * m, batch, complex] (b_out**2, nbatch, 2)
'''
nspec = b_out ** 2
nbatch = x.size(0)
wigner = _setup_wigner(b_in, nl=b_out, weighted=not for_grad, device=x.device)
wigner = wigner.view(2 * b_in, -1) # [beta, l * m] (2 * b_in, nspec)
x = torch.view_as_real(torch.fft.fft(torch.view_as_complex(x))) # [batch, beta, m, complex]
output = x.new_empty((nspec, nbatch, 2))
if x.is_cuda and x.dtype == torch.float32:
import s2cnn.utils.cuda as cuda_utils
cuda_kernel = _setup_s2fft_cuda_kernel(b=b_in, nspec=nspec, nbatch=nbatch, device=x.device.index)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
cuda_kernel(block=(1024, 1, 1),
grid=(cuda_utils.get_blocks(nspec * nbatch, 1024), 1, 1),
args=[x.contiguous().data_ptr(), wigner.contiguous().data_ptr(), output.data_ptr()],
stream=stream)
# [l * m, batch, complex]
else:
for l in range(b_out):
s = slice(l ** 2, l ** 2 + 2 * l + 1)
xx = torch.cat((x[:, :, -l:], x[:, :, :l + 1]), dim=2) if l > 0 else x[:, :, :1]
output[s] = torch.einsum("bm,zbmc->mzc", (wigner[:, s], xx))
output = output.view(-1, *batch_size, 2) # [l * m, ..., complex] (nspec, ..., 2)
return output | :param x: [..., beta, alpha, complex]
:return: [l * m, ..., complex] | s2_fft | python | jonkhler/s2cnn | s2cnn/soft/s2_fft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/s2_fft.py | MIT |
def s2_ifft(x, for_grad=False, b_out=None):
'''
:param x: [l * m, ..., complex]
'''
assert x.size(-1) == 2
nspec = x.size(0)
b_in = round(nspec ** 0.5)
assert nspec == b_in ** 2
if b_out is None:
b_out = b_in
assert b_out >= b_in
batch_size = x.size()[1:-1]
x = x.view(nspec, -1, 2) # [l * m, batch, complex] (nspec, nbatch, 2)
'''
:param x: [l * m, batch, complex] (b_in**2, nbatch, 2)
:return: [batch, beta, alpha, complex] (nbatch, 2 b_out, 2 * b_out, 2)
'''
nbatch = x.size(1)
wigner = _setup_wigner(b_out, nl=b_in, weighted=for_grad, device=x.device)
wigner = wigner.view(2 * b_out, -1) # [beta, l * m] (2 * b_out, nspec)
if x.is_cuda and x.dtype == torch.float32:
import s2cnn.utils.cuda as cuda_utils
cuda_kernel = _setup_s2ifft_cuda_kernel(b=b_out, nl=b_in, nbatch=nbatch, device=x.device.index)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
output = x.new_empty((nbatch, 2 * b_out, 2 * b_out, 2))
cuda_kernel(block=(1024, 1, 1),
grid=(cuda_utils.get_blocks(nbatch * (2 * b_out) ** 2, 1024), 1, 1),
args=[x.data_ptr(), wigner.data_ptr(), output.data_ptr()],
stream=stream)
# [batch, beta, m, complex] (nbatch, 2 * b_out, 2 * b_out, 2)
else:
output = x.new_zeros((nbatch, 2 * b_out, 2 * b_out, 2))
for l in range(b_in):
s = slice(l ** 2, l ** 2 + 2 * l + 1)
out = torch.einsum("mzc,bm->zbmc", (x[s], wigner[:, s]))
output[:, :, :l + 1] += out[:, :, -l - 1:]
if l > 0:
output[:, :, -l:] += out[:, :, :l]
output = torch.view_as_real(torch.fft.ifft(torch.view_as_complex(output))) * output.size(-2) # [batch, beta, alpha, complex]
output = output.view(*batch_size, 2 * b_out, 2 * b_out, 2)
return output | :param x: [l * m, ..., complex] | s2_ifft | python | jonkhler/s2cnn | s2cnn/soft/s2_fft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/s2_fft.py | MIT |
def so3_fft(x, for_grad=False, b_out=None):
'''
:param x: [..., beta, alpha, gamma, complex]
:return: [l * m * n, ..., complex]
'''
assert x.size(-1) == 2, x.size()
b_in = x.size(-2) // 2
assert x.size(-2) == 2 * b_in
assert x.size(-3) == 2 * b_in
assert x.size(-4) == 2 * b_in
if b_out is None:
b_out = b_in
batch_size = x.size()[:-4]
x = x.view(-1, 2 * b_in, 2 * b_in, 2 * b_in, 2) # [batch, beta, alpha, gamma, complex]
'''
:param x: [batch, beta, alpha, gamma, complex] (nbatch, 2 b_in, 2 b_in, 2 b_in, 2)
:return: [l * m * n, batch, complex] (b_out (4 b_out**2 - 1) // 3, nbatch, 2)
'''
nspec = b_out * (4 * b_out ** 2 - 1) // 3
nbatch = x.size(0)
wigner = _setup_wigner(b_in, nl=b_out, weighted=not for_grad, device=x.device) # [beta, l * m * n]
x = torch.view_as_real(torch.fft.fftn(torch.view_as_complex(x),dim=[2,3])) # [batch, beta, m, n, complex]
output = x.new_empty((nspec, nbatch, 2))
if x.is_cuda and x.dtype == torch.float32:
cuda_kernel = _setup_so3fft_cuda_kernel(b_in=b_in, b_out=b_out, nbatch=nbatch, real_input=False, device=x.device.index)
cuda_kernel(x, wigner, output) # [l * m * n, batch, complex]
else:
if b_in < b_out:
output.fill_(0)
for l in range(b_out):
s = slice(l * (4 * l ** 2 - 1) // 3, l * (4 * l ** 2 - 1) // 3 + (2 * l + 1) ** 2)
l1 = min(l, b_in - 1) # if b_out > b_in, consider high frequencies as null
xx = x.new_zeros((x.size(0), x.size(1), 2 * l + 1, 2 * l + 1, 2))
xx[:, :, l: l + l1 + 1, l: l + l1 + 1] = x[:, :, :l1 + 1, :l1 + 1]
if l1 > 0:
xx[:, :, l - l1:l, l: l + l1 + 1] = x[:, :, -l1:, :l1 + 1]
xx[:, :, l: l + l1 + 1, l - l1:l] = x[:, :, :l1 + 1, -l1:]
xx[:, :, l - l1:l, l - l1:l] = x[:, :, -l1:, -l1:]
out = torch.einsum("bmn,zbmnc->mnzc", (wigner[:, s].view(-1, 2 * l + 1, 2 * l + 1), xx))
output[s] = out.view((2 * l + 1) ** 2, -1, 2)
output = output.view(-1, *batch_size, 2) # [l * m * n, ..., complex]
return output | :param x: [..., beta, alpha, gamma, complex]
:return: [l * m * n, ..., complex] | so3_fft | python | jonkhler/s2cnn | s2cnn/soft/so3_fft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_fft.py | MIT |
def so3_rfft(x, for_grad=False, b_out=None):
'''
:param x: [..., beta, alpha, gamma]
:return: [l * m * n, ..., complex]
'''
b_in = x.size(-1) // 2
assert x.size(-1) == 2 * b_in
assert x.size(-2) == 2 * b_in
assert x.size(-3) == 2 * b_in
if b_out is None:
b_out = b_in
batch_size = x.size()[:-3]
x = x.contiguous().view(-1, 2 * b_in, 2 * b_in, 2 * b_in) # [batch, beta, alpha, gamma]
'''
:param x: [batch, beta, alpha, gamma] (nbatch, 2 b_in, 2 b_in, 2 b_in)
:return: [l * m * n, batch, complex] (b_out (4 b_out**2 - 1) // 3, nbatch, 2)
'''
nspec = b_out * (4 * b_out ** 2 - 1) // 3
nbatch = x.size(0)
wigner = _setup_wigner(b_in, nl=b_out, weighted=not for_grad, device=x.device)
output = x.new_empty((nspec, nbatch, 2))
if x.is_cuda and x.dtype == torch.float32:
x = torch.view_as_real(torch.fft.rfftn(x, dim=[2,3])) # [batch, beta, m, n, complex]
cuda_kernel = _setup_so3fft_cuda_kernel(b_in=b_in, b_out=b_out, nbatch=nbatch, real_input=True, device=x.device.index)
cuda_kernel(x, wigner, output)
else:
x = torch.view_as_real(torch.fft.rfftn(torch.view_as_complex(torch.stack((x, torch.zeros_like(x)), dim=-1)), dim=[2,3]))
if b_in < b_out:
output.fill_(0)
for l in range(b_out):
s = slice(l * (4 * l**2 - 1) // 3, l * (4 * l**2 - 1) // 3 + (2 * l + 1) ** 2)
l1 = min(l, b_in - 1) # if b_out > b_in, consider high frequencies as null
xx = x.new_zeros((x.size(0), x.size(1), 2 * l + 1, 2 * l + 1, 2))
xx[:, :, l: l + l1 + 1, l: l + l1 + 1] = x[:, :, :l1 + 1, :l1 + 1]
if l1 > 0:
xx[:, :, l - l1:l, l: l + l1 + 1] = x[:, :, -l1:, :l1 + 1]
xx[:, :, l: l + l1 + 1, l - l1:l] = x[:, :, :l1 + 1, -l1:]
xx[:, :, l - l1:l, l - l1:l] = x[:, :, -l1:, -l1:]
out = torch.einsum("bmn,zbmnc->mnzc", (wigner[:, s].view(-1, 2 * l + 1, 2 * l + 1), xx))
output[s] = out.view((2 * l + 1) ** 2, -1, 2)
output = output.view(-1, *batch_size, 2) # [l * m * n, ..., complex]
return output | :param x: [..., beta, alpha, gamma]
:return: [l * m * n, ..., complex] | so3_rfft | python | jonkhler/s2cnn | s2cnn/soft/so3_fft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_fft.py | MIT |
def so3_ifft(x, for_grad=False, b_out=None):
'''
:param x: [l * m * n, ..., complex]
'''
assert x.size(-1) == 2
nspec = x.size(0)
b_in = round((3 / 4 * nspec) ** (1 / 3))
assert nspec == b_in * (4 * b_in ** 2 - 1) // 3
if b_out is None:
b_out = b_in
batch_size = x.size()[1:-1]
x = x.view(nspec, -1, 2) # [l * m * n, batch, complex] (nspec, nbatch, 2)
'''
:param x: [l * m * n, batch, complex] (b_in (4 b_in**2 - 1) // 3, nbatch, 2)
:return: [batch, beta, alpha, gamma, complex] (nbatch, 2 b_out, 2 b_out, 2 b_out, 2)
'''
nbatch = x.size(1)
wigner = _setup_wigner(b_out, nl=b_in, weighted=for_grad, device=x.device) # [beta, l * m * n] (2 * b_out, nspec)
output = x.new_empty((nbatch, 2 * b_out, 2 * b_out, 2 * b_out, 2))
if x.is_cuda and x.dtype == torch.float32:
cuda_kernel = _setup_so3ifft_cuda_kernel(b_in=b_in, b_out=b_out, nbatch=nbatch, real_output=False, device=x.device.index)
cuda_kernel(x, wigner, output) # [batch, beta, m, n, complex]
else:
output.fill_(0)
for l in range(min(b_in, b_out)):
s = slice(l * (4 * l**2 - 1) // 3, l * (4 * l**2 - 1) // 3 + (2 * l + 1) ** 2)
out = torch.einsum("mnzc,bmn->zbmnc", (x[s].view(2 * l + 1, 2 * l + 1, -1, 2), wigner[:, s].view(-1, 2 * l + 1, 2 * l + 1)))
l1 = min(l, b_out - 1) # if b_out < b_in
output[:, :, :l1 + 1, :l1 + 1] += out[:, :, l: l + l1 + 1, l: l + l1 + 1]
if l > 0:
output[:, :, -l1:, :l1 + 1] += out[:, :, l - l1: l, l: l + l1 + 1]
output[:, :, :l1 + 1, -l1:] += out[:, :, l: l + l1 + 1, l - l1: l]
output[:, :, -l1:, -l1:] += out[:, :, l - l1: l, l - l1: l]
output = torch.view_as_real(torch.fft.ifftn(torch.view_as_complex(output), dim=[2,3])) * output.size(-2) ** 2 # [batch, beta, alpha, gamma, complex]
output = output.view(*batch_size, 2 * b_out, 2 * b_out, 2 * b_out, 2)
return output | :param x: [l * m * n, ..., complex] | so3_ifft | python | jonkhler/s2cnn | s2cnn/soft/so3_fft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_fft.py | MIT |
def so3_rifft(x, for_grad=False, b_out=None):
'''
:param x: [l * m * n, ..., complex]
'''
assert x.size(-1) == 2
nspec = x.size(0)
b_in = round((3 / 4 * nspec) ** (1 / 3))
assert nspec == b_in * (4 * b_in ** 2 - 1) // 3
if b_out is None:
b_out = b_in
batch_size = x.size()[1:-1]
x = x.view(nspec, -1, 2) # [l * m * n, batch, complex] (nspec, nbatch, 2)
'''
:param x: [l * m * n, batch, complex] (b_in (4 b_in**2 - 1) // 3, nbatch, 2)
:return: [batch, beta, alpha, gamma] (nbatch, 2 b_out, 2 b_out, 2 b_out)
'''
nbatch = x.size(1)
wigner = _setup_wigner(b_out, nl=b_in, weighted=for_grad, device=x.device) # [beta, l * m * n] (2 * b_out, nspec)
output = x.new_empty((nbatch, 2 * b_out, 2 * b_out, 2 * b_out, 2))
if x.is_cuda and x.dtype == torch.float32:
cuda_kernel = _setup_so3ifft_cuda_kernel(b_in=b_in, b_out=b_out, nbatch=nbatch, real_output=True, device=x.device.index)
cuda_kernel(x, wigner, output) # [batch, beta, m, n, complex]
else:
# TODO can be optimized knowing that the output is real, like in _setup_so3ifft_cuda_kernel(real_output=True)
output.fill_(0)
for l in range(min(b_in, b_out)):
s = slice(l * (4 * l**2 - 1) // 3, l * (4 * l**2 - 1) // 3 + (2 * l + 1) ** 2)
out = torch.einsum("mnzc,bmn->zbmnc", (x[s].view(2 * l + 1, 2 * l + 1, -1, 2), wigner[:, s].view(-1, 2 * l + 1, 2 * l + 1)))
l1 = min(l, b_out - 1) # if b_out < b_in
output[:, :, :l1 + 1, :l1 + 1] += out[:, :, l: l + l1 + 1, l: l + l1 + 1]
if l > 0:
output[:, :, -l1:, :l1 + 1] += out[:, :, l - l1: l, l: l + l1 + 1]
output[:, :, :l1 + 1, -l1:] += out[:, :, l: l + l1 + 1, l - l1: l]
output[:, :, -l1:, -l1:] += out[:, :, l - l1: l, l - l1: l]
output = torch.view_as_real(torch.fft.ifftn(torch.view_as_complex(output), dim=[2,3])) * output.size(-2) ** 2 # [batch, beta, alpha, gamma, complex]
output = output[..., 0] # [batch, beta, alpha, gamma]
output = output.contiguous()
output = output.view(*batch_size, 2 * b_out, 2 * b_out, 2 * b_out)
return output | :param x: [l * m * n, ..., complex] | so3_rifft | python | jonkhler/s2cnn | s2cnn/soft/so3_fft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_fft.py | MIT |
def _setup_so3fft_cuda_kernel(b_in, b_out, nbatch, real_input, device=0):
kernel = '''
#define B_IN {}
#define B_OUT {}
#define NSPEC {}
#define NBATCH {}
'''.format(b_in, b_out, b_out * (4 * b_out ** 2 - 1) // 3, nbatch)
if real_input:
kernel += '''
#define REAL_IN
'''
kernel += '''
#define MOD(i, n) (((i) + (n)) % (n))
#define MAX(x, y) ((x) < (y) ? (y) : (x))
#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))
extern "C"
__global__ void main_(const float* in, const float* wig, float* out)
{
// blockIdx = (l, batch, mn)
// blockDim = (32, 32, 1)
// threadIdx = (sub l, sub batch, 0)
// gridDim = (b / 32, nbatch / 32, (2b-1)**2)
int m = (blockIdx.z / (2 * B_OUT - 1)) - (B_OUT - 1);
int n = (blockIdx.z % (2 * B_OUT - 1)) - (B_OUT - 1);
int l_min = MAX(abs(m), abs(n));
if (blockIdx.x * 32 + 31 < l_min) {
// for blocks fully out of l-range
return; // note: this return does not depend on threadIdx
}
#ifdef REAL_IN
if (n < 0 || (n == 0 && m < 0)) {
return; // note: this return does not depend on threadIdx
}
#endif
int batch = blockIdx.y * 32 + threadIdx.y;
int l = blockIdx.x * 32 + threadIdx.x;
int lmn = (4 * l*l - 1) * l / 3 + (l+m) * (2 * l + 1) + (l+n);
float sum_re = 0.0;
float sum_im = 0.0;
for (int tile = 0; tile < CEIL_DIV(2 * B_IN, 32); ++tile) {
__shared__ float tileA[32][32][2];
__shared__ float tileB[32][32];
int beta = tile * 32 + threadIdx.x;
#ifdef REAL_IN
// `in` shape is (NBATCH, 2 * B_IN, 2 * B_IN, B_IN + 1, 2)
// http://www.fftw.org/fftw3_doc/Multi_002dDimensional-DFTs-of-Real-Data.html
int i = (((batch * 2*B_IN + beta) * 2*B_IN + MOD(m, 2*B_IN)) * (B_IN + 1) + n) * 2;
#else
int i = (((batch * 2*B_IN + beta) * 2*B_IN + MOD(m, 2*B_IN)) * 2*B_IN + MOD(n, 2*B_IN)) * 2;
#endif
tileA[threadIdx.y][threadIdx.x][0] = beta < 2*B_IN && batch < NBATCH && m < B_IN && n < B_IN && m > -B_IN && n > -B_IN ? in[i + 0] : 0.0;
tileA[threadIdx.y][threadIdx.x][1] = beta < 2*B_IN && batch < NBATCH && m < B_IN && n < B_IN && m > -B_IN && n > -B_IN ? in[i + 1] : 0.0;
// add constraints to m and n to remove aliasing (when b_out > b_in)
beta = tile * 32 + threadIdx.y;
tileB[threadIdx.y][threadIdx.x] = beta < 2*B_IN && l_min <= l && l < B_OUT ? wig[beta * NSPEC + lmn] : 0.0;
__syncthreads();
for (int beta = 0; beta < 32; ++beta) {
sum_re += tileA[threadIdx.y][beta][0] * tileB[beta][threadIdx.x];
sum_im += tileA[threadIdx.y][beta][1] * tileB[beta][threadIdx.x];
}
__syncthreads();
}
// About this if: some blocks are used to compute but not to save the results
if (l_min <= l && l < B_OUT && batch < NBATCH) {
out[(lmn * NBATCH + batch) * 2 + 0] = sum_re;
out[(lmn * NBATCH + batch) * 2 + 1] = sum_im;
#ifdef REAL_IN
lmn = (4 * l*l - 1) * l / 3 + (l-m) * (2 * l + 1) + (l-n);
float fudge = (m - n) % 2 == 0 ? 1.0 : -1.0;
out[(lmn * NBATCH + batch) * 2 + 0] = fudge * sum_re;
out[(lmn * NBATCH + batch) * 2 + 1] = -fudge * sum_im;
#endif
}
}
'''
import s2cnn.utils.cuda as cuda_utils
kernel = cuda_utils.compile_kernel(kernel, 'so3fft.cu', 'main_')
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
def fun(x, wigner, output):
assert output.is_contiguous()
kernel(block=(32, 32, 1),
grid=(math.ceil(b_out / 32), math.ceil(nbatch / 32), (2 * b_out - 1) ** 2),
args=[x.contiguous().data_ptr(), wigner.contiguous().data_ptr(), output.data_ptr()],
stream=stream)
return fun | .format(b_in, b_out, b_out * (4 * b_out ** 2 - 1) // 3, nbatch)
if real_input:
kernel += | _setup_so3fft_cuda_kernel | python | jonkhler/s2cnn | s2cnn/soft/so3_fft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_fft.py | MIT |
def _setup_so3ifft_cuda_kernel(b_in, b_out, nbatch, real_output, device=0):
kernel = '''
#define B_IN {}
#define B_OUT {}
#define NSPEC {}
#define NBATCH {}
'''.format(b_in, b_out, b_in * (4 * b_in ** 2 - 1) // 3, nbatch)
if real_output:
kernel += '''
#define REAL_OUT
'''
kernel += '''
#define MOD(i, n) (((i) + (n)) % (n))
#define MAX(x, y) ((x) < (y) ? (y) : (x))
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))
extern "C"
__global__ void main_(const float* in, const float* wig, float* out)
{
int m = (blockIdx.z / (2 * B_OUT - 1)) - (B_OUT - 1);
int n = (blockIdx.z % (2 * B_OUT - 1)) - (B_OUT - 1);
#ifdef REAL_OUT
if (n < 0 || (n == 0 && m < 0)) {
return; // note: this return does not depend on threadIdx
}
#endif
int l_min = MAX(abs(m), abs(n));
int batch = blockIdx.y * 32 + threadIdx.y;
float sum_re = 0.0;
float sum_im = 0.0;
// will not calculate when l > min(b_in, b_out)-1
for (int tile = 0; tile < CEIL_DIV(MIN(B_IN, B_OUT) - l_min, 32); ++tile) {
__shared__ float tileA[2][32][32];
__shared__ float tileB[32][32+1];
int l = l_min + tile * 32 + threadIdx.x;
int lmn = (4 * l*l - 1) * l / 3 + (l+m) * (2 * l + 1) + (l+n);
int i = (lmn * NBATCH + batch) * 2;
tileA[0][threadIdx.y][threadIdx.x] = l < MIN(B_IN, B_OUT) && batch < NBATCH && m < B_OUT && n < B_OUT && m > -B_OUT && n > -B_OUT ? in[i + 0] : 0.0;
tileA[1][threadIdx.y][threadIdx.x] = l < MIN(B_IN, B_OUT) && batch < NBATCH && m < B_OUT && n < B_OUT && m > -B_OUT && n > -B_OUT ? in[i + 1] : 0.0;
// add constraints to m and n to remove aliasing (when b_out < b_in)
int beta = blockIdx.x * 32 + threadIdx.y;
tileB[threadIdx.x][threadIdx.y] = l < MIN(B_IN, B_OUT) && beta < 2*B_OUT ? wig[beta * NSPEC + lmn] : 0.0;
__syncthreads();
for (int l = 0; l < 32; ++l) {
sum_re += tileA[0][threadIdx.y][l] * tileB[l][threadIdx.x];
sum_im += tileA[1][threadIdx.y][l] * tileB[l][threadIdx.x];
}
__syncthreads();
}
int beta = blockIdx.x * 32 + threadIdx.x;
if (beta < 2*B_OUT && batch < NBATCH) {
int i = (((batch * 2*B_OUT + beta) * 2*B_OUT + MOD(m, 2*B_OUT)) * 2*B_OUT + MOD(n, 2*B_OUT)) * 2;
out[i + 0] = sum_re;
out[i + 1] = sum_im;
#ifdef REAL_OUT
i = (((batch * 2*B_OUT + beta) * 2*B_OUT + MOD(-m, 2*B_OUT)) * 2*B_OUT + MOD(-n, 2*B_OUT)) * 2;
out[i + 0] = sum_re;
out[i + 1] = -sum_im;
#endif
}
}
'''
import s2cnn.utils.cuda as cuda_utils
kernel = cuda_utils.compile_kernel(kernel, 'so3ifft.cu', 'main_')
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
def fun(x, wigner, output):
output[:] = 0
kernel(block=(32, 32, 1),
grid=(math.ceil(2 * b_out / 32), math.ceil(nbatch / 32), (2 * b_out - 1) ** 2),
args=[x.data_ptr(), wigner.data_ptr(), output.data_ptr()],
stream=stream)
return fun | .format(b_in, b_out, b_in * (4 * b_in ** 2 - 1) // 3, nbatch)
if real_output:
kernel += | _setup_so3ifft_cuda_kernel | python | jonkhler/s2cnn | s2cnn/soft/so3_fft.py | https://github.com/jonkhler/s2cnn/blob/master/s2cnn/soft/so3_fft.py | MIT |
def eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id=0):
""" evaluate a batch for the baseline mlp """
atom_types = to_one_hot(data["features"]["atom_types"][batch_idxs, ...],
NUM_ATOM_TYPES)
targets = data["targets"][batch_idxs, ...]
atom_types = Variable(atom_types)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types)
loss = criterion(outputs, targets)
return loss | evaluate a batch for the baseline mlp | eval_batch_mlp | python | jonkhler/s2cnn | examples/molecules/run_experiment.py | https://github.com/jonkhler/s2cnn/blob/master/examples/molecules/run_experiment.py | MIT |
def eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion, device_id=0):
""" evaluate a batch for the s2cnn """
geometry = data["features"]["geometry"][batch_idxs, ...]
atom_types = data["features"]["atom_types"][batch_idxs, ...]
atom_types_one_hot = to_one_hot(atom_types, NUM_ATOM_TYPES)
targets = data["targets"][batch_idxs, ...]
geometry = Variable(geometry)
atom_types = Variable(atom_types)
atom_types_one_hot = Variable(atom_types_one_hot)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types_one_hot = atom_types_one_hot.cuda(device_id)
geometry = geometry.cuda(device_id)
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types_one_hot)
outputs += s2cnn(geometry, atom_types)
loss = criterion(outputs, targets)
return loss | evaluate a batch for the s2cnn | eval_batch_s2cnn | python | jonkhler/s2cnn | examples/molecules/run_experiment.py | https://github.com/jonkhler/s2cnn/blob/master/examples/molecules/run_experiment.py | MIT |
def train_baseline(mlp, data, train_batches, test_batches, num_epochs,
learning_rate_mlp, device_id=0):
""" train the baseline model """
optim = OPTIMIZER(mlp.parameters(), lr=learning_rate_mlp)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
train_losses = []
print("training")
for iteration, batch_idxs in enumerate(train_batches):
mlp.train()
optim.zero_grad()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id)
loss.backward()
optim.step()
train_losses.append(loss.item())
print("\riteration {}/{}".format(
iteration+1, train_batches.num_iterations()), end="")
print()
test_losses = []
print("evaluating")
for iteration, batch_idxs in enumerate(test_batches):
mlp.eval()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion)
test_losses.append(loss.item())
print("\riteration {}/{}".format(
iteration+1, test_batches.num_iterations()), end="")
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print("epoch {}/{} - avg train loss: {}, test loss: {}".format(
epoch+1, num_epochs, train_loss, test_loss))
return train_loss, test_loss | train the baseline model | train_baseline | python | jonkhler/s2cnn | examples/molecules/run_experiment.py | https://github.com/jonkhler/s2cnn/blob/master/examples/molecules/run_experiment.py | MIT |
def train_s2cnn(mlp, s2cnn, data, train_batches, test_batches, num_epochs,
init_learning_rate_s2cnn, learning_rate_decay_epochs,
device_id=0):
""" train the s2cnn keeping the baseline frozen """
optim = OPTIMIZER(s2cnn.parameters(), lr=init_learning_rate_s2cnn)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
optim = exp_lr_scheduler(optim, epoch,
init_lr=init_learning_rate_s2cnn,
lr_decay_epoch=learning_rate_decay_epochs)
train_losses = []
print("training")
for iteration, batch_idxs in enumerate(train_batches):
s2cnn.train()
mlp.eval()
optim.zero_grad()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
loss.backward()
optim.step()
train_losses.append(loss.item())
print("\riteration {}/{} - batch loss: {}".format(
iteration+1, train_batches.num_iterations(),
np.sqrt(train_losses[-1])), end="")
print()
test_losses = []
print("evaluating")
for iteration, batch_idxs in enumerate(test_batches):
s2cnn.eval()
mlp.eval()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
test_losses.append(loss.item())
print("\riteration {}/{} - batch loss: {}".format(
iteration+1, test_batches.num_iterations(),
np.sqrt(test_losses[-1])), end="")
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print("epoch {}/{} - avg train loss: {}, test loss: {}".format(
epoch+1, num_epochs, train_loss, test_loss))
return train_loss, test_loss | train the s2cnn keeping the baseline frozen | train_s2cnn | python | jonkhler/s2cnn | examples/molecules/run_experiment.py | https://github.com/jonkhler/s2cnn/blob/master/examples/molecules/run_experiment.py | MIT |
def forward(self, x):
'''
x: [batch, n_atoms, n_types, beta, alpha]
types: [batch, n_atoms, n_types]
'''
# get charge
z = torch.autograd.Variable(
torch.from_numpy(np.array(CHARGES))
).view(1, -1).float().cuda()
# get atom frequency per molecule
x = torch.sum(x, dim=1)
x[:, 0] = 0
# multiply frequency by charge
# TODO: concatenate instead?
z = z.expand_as(x)
x = x * z
# simple transform
x = self.W_h(x)
x = F.relu(x)
x = self.W_h2(x)
x = F.relu(x)
x = self.W_t(x)
return x | x: [batch, n_atoms, n_types, beta, alpha]
types: [batch, n_atoms, n_types] | forward | python | jonkhler/s2cnn | examples/molecules/baseline_model.py | https://github.com/jonkhler/s2cnn/blob/master/examples/molecules/baseline_model.py | MIT |
def load_data(path, test_strat_id=None, cuda=None):
'''
Loads the data
path: path to the molecule .gz
batch_size: size of a mini batch
test_strat_id: id of strat being used as test set
'''
data = joblib.load(path)
# map charges to type indices
# TODO refactor to individual function
# TODO make less reliant on individual data dict structure
type_remap = -np.ones(int(data["features"]["atom_types"].max())+1)
unique_types = np.unique(data["features"]["atom_types"]).astype(int)
type_remap[unique_types] = np.arange(len(unique_types))
data["features"]["atom_types"] = type_remap[
data["features"]["atom_types"].astype(int)]
# wrap features as torch tensors
data["features"]["geometry"] = torch.FloatTensor(
data["features"]["geometry"].astype(np.float32))
data["features"]["atom_types"] = torch.LongTensor(
data["features"]["atom_types"].astype(np.int64))
data["targets"] = torch.from_numpy(data["targets"])
if cuda is not None:
data["features"]["geometry"].cuda(cuda)
data["features"]["atom_types"].cuda(cuda)
data["targets"].cuda(cuda)
train = np.ndarray((0))
test = np.ndarray((0))
# split in train and test set according to used strat
# TODO this should be solved in a less ugly/ad-hoc fashion!
if not test_strat_id:
test_strat_id = np.random.randint(len(data["strats"]))
for i in range(len(data["strats"])):
if i != test_strat_id:
train = np.concatenate((train, data["strats"][i]))
else:
test = np.concatenate((test, data["strats"][i]))
return data, train, test | Loads the data
path: path to the molecule .gz
batch_size: size of a mini batch
test_strat_id: id of strat being used as test set | load_data | python | jonkhler/s2cnn | examples/molecules/utils.py | https://github.com/jonkhler/s2cnn/blob/master/examples/molecules/utils.py | MIT |
def exp_lr_scheduler(optimizer, epoch, init_lr=5e-3, lr_decay_epoch=40):
"""Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs."""
lr = init_lr * (0.1**(epoch // lr_decay_epoch))
if epoch % lr_decay_epoch == 0:
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer | Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs. | exp_lr_scheduler | python | jonkhler/s2cnn | examples/molecules/utils.py | https://github.com/jonkhler/s2cnn/blob/master/examples/molecules/utils.py | MIT |
def get_raw_data(path):
""" load data from matlab file """
raw = spio.loadmat(path)
coordinates = raw["R"]
charges = raw["Z"]
energies = raw["T"]
strat_ids = raw["P"]
return coordinates, charges, energies, strat_ids | load data from matlab file | get_raw_data | python | jonkhler/s2cnn | examples/molecules/datagen.py | https://github.com/jonkhler/s2cnn/blob/master/examples/molecules/datagen.py | MIT |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.