response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Get a structure of sizes for a structure of nested arrays. | def _sizes(x):
"""Get a structure of sizes for a structure of nested arrays."""
def size(x):
try:
return x.size
except Exception: # pylint: disable=broad-except
return 0
return fastmath.nested_map(size, x) |
Repeat a stream indefinitely. | def _repeat_stream(stream, n_devices):
"""Repeat a stream indefinitely."""
while True:
for example in stream(n_devices):
yield example |
Make trainer_lib.inputs.Inputs. | def _test_inputs(n_classes, with_weights=False, input_shape=(6, 6, 3)):
"""Make trainer_lib.inputs.Inputs."""
batch_size = 2 * jax.device_count()
def input_stream(n_devices):
del n_devices
key = fastmath.random.get_prng(0)
while True:
keys = fastmath.random.split(key, 4)
key = keys[0]
inputs = fastmath.random.uniform(
keys[1], [batch_size] + list(input_shape))
targets = fastmath.random.randint(
keys[2], [batch_size], dtype=jnp.int32, minval=0, maxval=n_classes)
weights = fastmath.random.uniform(keys[3], [batch_size])
if with_weights:
yield inputs, targets, weights
else:
yield inputs, targets
def input_stream_masked(n_devices):
return inputs_lib.add_loss_weights(input_stream(n_devices))
return inputs_lib.Inputs(input_stream_masked) |
Make trainer_lib.inputs.Inputs for language model. | def _test_inputs_lm(vocab_size, seq_len, per_device_batch_size=2):
"""Make trainer_lib.inputs.Inputs for language model."""
batch_size = per_device_batch_size * jax.device_count()
def input_stream(_):
def make_batch(key):
return fastmath.random.randint(
key, [batch_size, seq_len], dtype=jnp.int32, minval=0,
maxval=vocab_size)
key = fastmath.random.get_prng(0)
while True:
keys = fastmath.random.split(key, 3)
key = keys[0]
inputs = make_batch(keys[1])
targets = make_batch(keys[2])
yield inputs, targets
def input_stream_masked(n_devices):
return inputs_lib.add_loss_weights(input_stream(n_devices))
return inputs_lib.Inputs(input_stream_masked) |
Returns a model+ends layer built on an already initialized model.
Ends can be loss or metric layers.
Args:
model: Layer with initialized weights and state.
end_layers: List of end layers.
batch_signature: Signature of the model input batch.
Returns:
An initialized, combined model+ends layer, preserving the initialization
of ``model``. | def _model_with_ends(model, end_layers, batch_signature):
"""Returns a model+ends layer built on an already initialized model.
Ends can be loss or metric layers.
Args:
model: Layer with initialized weights and state.
end_layers: List of end layers.
batch_signature: Signature of the model input batch.
Returns:
An initialized, combined model+ends layer, preserving the initialization
of ``model``.
"""
# TODO(jonni): Redo this function as part of an initialization refactor?
metrics_layer = tl.Branch(*end_layers)
metrics_input_signature = model.output_signature(batch_signature)
_, _ = metrics_layer.init(metrics_input_signature)
model_with_metrics = tl.Serial(model, metrics_layer)
return model_with_metrics |
Returns a model+metrics layer built on an already initialized model.
Args:
model: Layer with initialized weights and state.
eval_task: :py:class:`EvalTask` instance.
Returns:
An initialized, combined model+metrics layer, preserving the initialization
of ``model``. | def _model_with_metrics(model, eval_task):
"""Returns a model+metrics layer built on an already initialized model.
Args:
model: Layer with initialized weights and state.
eval_task: :py:class:`EvalTask` instance.
Returns:
An initialized, combined model+metrics layer, preserving the initialization
of ``model``.
"""
return _model_with_ends(
model, eval_task.metrics, shapes.signature(eval_task.sample_batch)
) |
Returns False for all step numbers. | def _never(*args):
"""Returns False for all step numbers."""
del args
return False |
A function that's true at 1 and n when n % period == 0. | def _at_step_1_and_every_nth_step(period):
"""A function that's true at 1 and n when n % period == 0."""
if period is None:
return lambda step_n: False
def _at_1_and_periodically(step_n):
return (step_n == 1) or (step_n > 0 and (step_n % period == 0))
return _at_1_and_periodically |
Pickle obj to file_path with gzipping and failure protection. | def pickle_to_file(obj, file_path, gzip=False):
"""Pickle obj to file_path with gzipping and failure protection."""
# Pickle to tmp file and overwrite to prevent writing partial files.
tmp_file_path = file_path + '._tmp_'
with tf.io.gfile.GFile(tmp_file_path, 'wb') as f:
if not gzip:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with gzip_lib.GzipFile(fileobj=f, compresslevel=2) as gzipf:
pickle.dump(obj, gzipf, protocol=pickle.HIGHEST_PROTOCOL)
# Moving a file is much less error-prone than pickling large files.
tf.io.gfile.rename(tmp_file_path, file_path, overwrite=True) |
Unpickle obj from file_path with gzipping. | def unpickle_from_file(file_path, gzip=False):
"""Unpickle obj from file_path with gzipping."""
with tf.io.gfile.GFile(file_path, 'rb') as f:
if not gzip:
obj = pickle.load(f)
else:
with gzip_lib.GzipFile(fileobj=f, compresslevel=2) as gzipf:
obj = pickle.load(gzipf)
return obj |
Initializes random generators for Python, NumPy, TensorFlow, and JAX. | def _init_random_number_generators(seed=None):
"""Initializes random generators for Python, NumPy, TensorFlow, and JAX."""
# Seed Python random (None as seed is okay), then use it to seed the others.
random.seed(seed)
if seed is None:
seed = random.randint(0, 2**31 - 1)
logging.info('using seed %d', seed)
np.random.seed(seed)
tf.random.set_seed(seed)
return jax_random.get_prng(seed) |
Initializes host and device attributes for this trainer.
Args:
n_devices: Number of devices this trainer will use. If ``None``, get the
number from the backend.
random_seed: Random seed as the starting point for all random numbers used
by the trainer. If ``None``, calculate one from system time and host id.
Returns:
is_chief: True if this trainer has special chief responsibilities.
host_count: Number of hosts in this computation.
n_devices: The passed in value of n_devices or a computed default (for this
host).
random_seed: The passed in value of random_seed or a computed default. | def init_host_and_devices(n_devices=None, random_seed=None):
"""Initializes host and device attributes for this trainer.
Args:
n_devices: Number of devices this trainer will use. If ``None``, get the
number from the backend.
random_seed: Random seed as the starting point for all random numbers used
by the trainer. If ``None``, calculate one from system time and host id.
Returns:
is_chief: True if this trainer has special chief responsibilities.
host_count: Number of hosts in this computation.
n_devices: The passed in value of n_devices or a computed default (for this
host).
random_seed: The passed in value of random_seed or a computed default.
"""
if fastmath.is_backend(fastmath.Backend.JAX):
host_id = jax.process_index()
host_count = jax.host_count()
else:
host_id = 0
host_count = 1
is_chief = (host_id == 0)
logging.info('Initializing hosts and devices: host_id %d, host_count %d, '
'is_chief %d', host_id, host_count, is_chief)
device_count = fastmath.local_device_count()
n_devices = n_devices or device_count
# TODO(lukaszkaiser): remove this restriction when possible.
if n_devices != device_count and fastmath.is_backend(fastmath.Backend.JAX):
raise ValueError('JAX cannot work yet with n_devices != all devices: '
'%d != %d' % (n_devices, device_count))
if random_seed is None and host_count > 1:
random_seed = int(1e6 * (host_id + time.time())) % 2**31
return (is_chief, host_count, n_devices,
_init_random_number_generators(random_seed)) |
Makes train and eval model's weights and state the same across hosts. | def _make_weights_and_state_same_across_hosts(weights_and_state):
"""Makes train and eval model's weights and state the same across hosts."""
# We assume that weights_and_state have been already replicated, i.e the
# leading axis is self._n_devices
# This is the total number of devices across all hosts.
n_devices_total = fastmath.psum(jnp.array(1.0), 'devices').astype(jnp.int32)
# We average the weights and state across all devices.
# We also make sure we don't change the type of the weights and state.
return fastmath.nested_map(
lambda x: (fastmath.psum(x, 'devices') / n_devices_total).astype(x.dtype),
weights_and_state) |
Checks whether no weights in the model have been initialized. | def _is_uninitialized(model):
"""Checks whether no weights in the model have been initialized."""
if not _is_empty(model.weights):
return False
return all(_is_uninitialized(l) for l in model.sublayers) |
Puts partial into full matching by shape. | def _match_by_shape(full, partial):
"""Puts partial into full matching by shape."""
partial_idx = 0
res = []
for w in full:
if partial_idx >= len(partial):
res.append(w) # read everything from parial list, just fill
elif w is None and partial[partial_idx] is None: # both Nones, move on
res.append(None)
partial_idx += 1
elif w is None or partial[partial_idx] is None: # one None but not both
res.append(w)
elif w.shape == partial[partial_idx].shape:
res.append(partial[partial_idx])
partial_idx += 1
else:
res.append(w)
if partial_idx < len(partial):
_log('Did not manage to match shapes in model for all checkpoint weights.')
for w in partial[:partial_idx]:
_log(' Inserted tensor of shape %s' % str(w.shape))
for i, w in enumerate(partial[partial_idx:]):
_log(' Not inserted tensor of shape %s' % str(w.shape))
model_weight_shape = str(full[i + partial_idx].shape)
_log(' Tensor in that place has shape: %s' % model_weight_shape)
raise IndexError
return res |
"Returns stream of labeled data that maps small integers to constant pi. | def _very_simple_data(output_dim=1, input_dim=1):
""""Returns stream of labeled data that maps small integers to constant pi."""
inputs_batch = np.arange(8).reshape((8, 1)) # 8 items per batch
inputs_batch = np.concatenate([inputs_batch] * input_dim, axis=1)
targets_batch = np.pi * np.ones((8, output_dim))
labeled_batch = (inputs_batch, targets_batch, np.ones_like(targets_batch))
while True:
yield labeled_batch |
"Returns stream of labeled data that maps small integers to constant pi. | def _very_simple_transformer_data():
""""Returns stream of labeled data that maps small integers to constant pi."""
inputs_batch = np.ones((2, 2)).astype(np.int32)
targets_batch = np.ones((2, 2, 8)).astype(np.int32)
labeled_batch = (inputs_batch, targets_batch, np.ones_like(targets_batch))
while True:
yield labeled_batch |
Returns number of files in a given directory. | def _count_files(path):
"""Returns number of files in a given directory."""
return len([filename for filename in os.listdir(path)
if os.path.isfile(os.path.join(path, filename))]) |
Loads the dataset.
Looks for the dataset at /tmp/mnist.pkl.gz and downloads it if it is not there
already.
Note: The training data is shuffled.
Returns:
((train_x, train_y), (valid_x, valid_y), (test_x, test_y)).
Shapes:
train_x: num_training_examples x image_size
train_y: num_training_examples x num_classes
valid_x: num_validation_examples x image_size
valid_y: num_validation_examples x num_classes
test_x: num_test_examples x image_size
test_y: num_test_examples x num_classes | def load():
"""Loads the dataset.
Looks for the dataset at /tmp/mnist.pkl.gz and downloads it if it is not there
already.
Note: The training data is shuffled.
Returns:
((train_x, train_y), (valid_x, valid_y), (test_x, test_y)).
Shapes:
train_x: num_training_examples x image_size
train_y: num_training_examples x num_classes
valid_x: num_validation_examples x image_size
valid_y: num_validation_examples x num_classes
test_x: num_test_examples x image_size
test_y: num_test_examples x num_classes
"""
filepath = _maybe_download()
with gzip.open(os.path.join(filepath), 'rb') as f:
training_data, validation_data, test_data = pickle.load(f)
training_data = (training_data[0], [to_one_hot(x) for x in training_data[1]])
validation_data = (validation_data[0],
[to_one_hot(x) for x in validation_data[1]])
test_data = (test_data[0], [to_one_hot(x) for x in test_data[1]])
def shuffle(data):
zipped = zip(*data)
random.shuffle(zipped)
return zip(*zipped)
return (shuffle(training_data), validation_data, test_data) |
Downloads the MNIST dataset if it is not there already. | def _maybe_download():
"""Downloads the MNIST dataset if it is not there already."""
data_url = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
filename = data_url.split('/')[-1]
filepath = os.path.join(_get_data_dir(), filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
print('\r>> Downloading %s %.1f%%' %
(filename, float(count * block_size) / float(total_size) * 100.0))
filepath, _ = urllib.urlretrieve(data_url, filepath, _progress)
statinfo = os.stat(filepath)
print('Successfully downloaded %s %d bytes.' % (filename, statinfo.st_size))
else:
print('Data already present on disk.')
return filepath |
Runs the training. | def train(batch_size, learning_rate, num_training_iters, validation_steps):
"""Runs the training."""
print('Loading data')
training_data, validation_data, test_data = dataset.load()
print('Loaded dataset with {} training, {} validation and {} test examples.'.
format(
len(training_data[0]), len(validation_data[0]), len(test_data[0])))
assert len(training_data[0]) % batch_size == 0
assert len(validation_data[0]) % batch_size == 0
assert len(test_data[0]) % batch_size == 0
def build_iterator(data, infinite=True):
"""Build the iterator for inputs."""
index = 0
size = len(data[0])
while True:
if index + batch_size > size:
if infinite:
index = 0
else:
return
yield data[0][index:index + batch_size], data[1][index:index + batch_size]
index += batch_size
train_iter = build_iterator(training_data)
model = model_lib.Model([30])
for i in range(num_training_iters):
train_x, train_y = next(train_iter)
model.train(train_x, train_y, learning_rate)
if (i + 1) % validation_steps == 0:
validation_iter = build_iterator(validation_data, infinite=False)
correct_predictions = 0
for valid_x, valid_y in validation_iter:
correct_predictions += model.evaluate(valid_x, valid_y)
print('{}/{} correct validation predictions.'.format(
correct_predictions, len(validation_data[0]))) |
Canonicalize arguments to be used for jit.
Args:
inp: a nested structure of arguments to be canonicalized (i.e. to be
converted to Tensors). Only tf_np.ndarray and things accepted by
`tf.convert_to_tensor` will be converted.
Returns:
The canonicalized version. | def _canonicalize_jit_arguments(inp):
"""Canonicalize arguments to be used for jit.
Args:
inp: a nested structure of arguments to be canonicalized (i.e. to be
converted to Tensors). Only tf_np.ndarray and things accepted by
`tf.convert_to_tensor` will be converted.
Returns:
The canonicalized version.
"""
return tf.nest.map_structure(_canonicalize_jit_arg, inp) |
Decorator to define a function with a custom gradient.
This function is very similar to `tf.custom_gradient`. See the documentation
of `tf.custom_gradient` for detailed usage.
The differences with `tf.custom_gradient` are:
- All arguments and results are tf_np.ndarrays instead of tensors.
- The `grad_fn` returned by `f_vjp` accepts and returns nested structures,
unlike that in `tf.custom_gradient` which only accepts and returns lists.
Args:
f_vjp: the same as the `f` argument of `tf.custom_gradient`. Note that all
inputs and outputs of `f_vjp` and of the `grad_fn` function it returns can
be nested structures.
f_original: (optional) not used.
Returns:
The same as `tf.custom_gradient`. | def custom_grad(f_vjp, f_original=None):
"""Decorator to define a function with a custom gradient.
This function is very similar to `tf.custom_gradient`. See the documentation
of `tf.custom_gradient` for detailed usage.
The differences with `tf.custom_gradient` are:
- All arguments and results are tf_np.ndarrays instead of tensors.
- The `grad_fn` returned by `f_vjp` accepts and returns nested structures,
unlike that in `tf.custom_gradient` which only accepts and returns lists.
Args:
f_vjp: the same as the `f` argument of `tf.custom_gradient`. Note that all
inputs and outputs of `f_vjp` and of the `grad_fn` function it returns can
be nested structures.
f_original: (optional) not used.
Returns:
The same as `tf.custom_gradient`.
"""
del f_original
@tf.custom_gradient
def tf_f(*tf_args, **tf_kwargs):
np_args = _tf_to_np(tf_args)
np_kwargs = _tf_to_np(tf_kwargs)
np_y, np_vjp = f_vjp(*np_args, **np_kwargs)
tf_y = np_y
def tf_vjp(*flat_tf_dy):
tf_dy = tf.nest.pack_sequence_as(tf_y, flat_tf_dy)
np_dy = _tf_to_np(tf_dy)
np_dx = np_vjp(np_dy)
return tf.nest.flatten(np_dx)
return tf_y, tf_vjp
def np_f(*args, **kwargs):
return _tf_to_np(tf_f(*args), **kwargs)
return np_f |
Returns the result and the VJP function of `f`.
This function returns the result and the vector-Jacobian-product (VJP)
function of `f`.
Args:
f: a function from (nested structures of) tf_np.ndarrays to a (nested
structure of) tf_np.ndarray. If `has_aux` is True, it should return an
extra output.
*primals: the inputs to be fed to `f`.
has_aux: if True, the second output of `f` will be regarded as an auxiliary,
non-differentiable output that will be ignored by the VJP function.
Returns:
A pair `(y, vjpfun)` if `has_aux` is False; a tuple `(y, vjpfun, aux)`
otherwise. `y` and `aux` are the outputs of `f`, i.e. `y, aux =
f(*primals)`. `vjpfun` is a function `dx = vjpfun(dy)`, where `dy` is the
cotengents of `y`, having the same structures, shapes and dtypes as
`y`. `dx` is the cotengents of `x`, having the same structures, shapes and
dtypes as `x`. | def vjp(f, *primals, has_aux=False):
"""Returns the result and the VJP function of `f`.
This function returns the result and the vector-Jacobian-product (VJP)
function of `f`.
Args:
f: a function from (nested structures of) tf_np.ndarrays to a (nested
structure of) tf_np.ndarray. If `has_aux` is True, it should return an
extra output.
*primals: the inputs to be fed to `f`.
has_aux: if True, the second output of `f` will be regarded as an auxiliary,
non-differentiable output that will be ignored by the VJP function.
Returns:
A pair `(y, vjpfun)` if `has_aux` is False; a tuple `(y, vjpfun, aux)`
otherwise. `y` and `aux` are the outputs of `f`, i.e. `y, aux =
f(*primals)`. `vjpfun` is a function `dx = vjpfun(dy)`, where `dy` is the
cotengents of `y`, having the same structures, shapes and dtypes as
`y`. `dx` is the cotengents of `x`, having the same structures, shapes and
dtypes as `x`.
"""
with tf.GradientTape(persistent=True) as tape:
tape.watch(tf.nest.flatten(primals))
outputs = f(*primals)
if has_aux:
np_out, aux = outputs
else:
np_out = outputs
def _vjp(dy):
tf_dx = tape.gradient(np_out, primals, output_gradients=dy)
return _tf_to_np(tf_dx)
if has_aux:
ret = (np_out, _vjp, aux)
else:
ret = (np_out, _vjp)
return ret |
Returns a function that computes gradient of f.
Gradients can only be computed through numpy and tensorflow operations and not
through python float operations and values.
Args:
f: a function of type (params, *args) -> scalar. 'params' can be a nested
structure (made of lists and tuples) of ndarrays and the gradient is
evaluated against it. `scalar` is a scalar ndarray.
has_aux: bool, indicates whether fun returns a pair where the first element
is considered the output of the mathematical function to be differentiated
and the second element is auxiliary data.
Returns:
A gradient function of type (params, *args) -> gradients, where the result
'gradients' has the same structure and shapes as 'params'. | def grad(f, has_aux=False):
"""Returns a function that computes gradient of f.
Gradients can only be computed through numpy and tensorflow operations and not
through python float operations and values.
Args:
f: a function of type (params, *args) -> scalar. 'params' can be a nested
structure (made of lists and tuples) of ndarrays and the gradient is
evaluated against it. `scalar` is a scalar ndarray.
has_aux: bool, indicates whether fun returns a pair where the first element
is considered the output of the mathematical function to be differentiated
and the second element is auxiliary data.
Returns:
A gradient function of type (params, *args) -> gradients, where the result
'gradients' has the same structure and shapes as 'params'.
"""
def check_loss_shape(np_loss):
if not isinstance(np_loss, tf_np.ndarray):
raise ValueError(
"The result of the function to take gradient must be an ndarray.")
if not np_loss.shape.is_compatible_with([]):
raise ValueError(
"The result of the function to take gradient must be a scalar.")
def _f(params, *args):
"""The gradient function to be returned."""
with tf.GradientTape() as g:
g.watch(tf.nest.flatten(params))
outputs = f(params, *args)
if has_aux:
np_loss, aux = outputs
else:
np_loss = outputs
check_loss_shape(np_loss)
tf_grads = g.gradient(np_loss, params)
if has_aux:
res = (tf_grads, aux)
else:
res = tf_grads
return _tf_to_np(res)
return _f |
A decorator that records some information about the function.
Args:
recorder: a function of signature `(args, kwargs, res) -> res`.
f: the original function.
Returns:
A transformed function that calls the original function and then the
recorder afterwards. | def _record_result_type(recorder, f):
"""A decorator that records some information about the function.
Args:
recorder: a function of signature `(args, kwargs, res) -> res`.
f: the original function.
Returns:
A transformed function that calls the original function and then the
recorder afterwards.
"""
def wrapper(*args, **kwargs):
res = f(*args, **kwargs)
res = recorder(args, kwargs, res)
return res
return wrapper |
Returns a function that runs a trace-compiled version of `f`.
A trace-compiled version of a function `f` has the same behavior as `f` (when
called with the same "static arguments", see below), but runs faster because
the whole computation is compiled into a computation graph once which is
reused for subsequent executions.
The trace compilation happens lazily, when the returned function is called for
the first time. The compiled function may not be cached implicitly and
multiple calls to `jit` may not share the compiled function (see below for
"static" vs "dynamic" arguments).
Args:
f: a function that takes any positional arguments `args` and any keyword
arguments `kwargs`. `ndarray`s and things accepted by
`tf.convert_to_tensor` in `args` and `kwargs` will be treated as 'dynamic
arguments' in the sense that calling the function with different values
for these arguments will not cause retracing. In contrast, arguments of
other types in `args` and `kwargs` are treated as 'static arguments' and
calling the function with different values of them will cause
re-compiling. Positional arguments whose positions are in `static_argnums`
are always treated as static arguments.
static_argnums: a tuple of positions of arguments that will be treated as
static arguments. Note that as aforementioned, any arguments that were not
convertible to tensor will also be static.
xla_forced_compile: if true, it will use XLA to force-compile the graph.
This requires that the function only contain ops that are XLA
compatible. It will compile the entire function into a single XLA op.
input_signature: a list of `tf.TensorSpec`, as the input signature to
control tracing behavior. See the
[doc](https://www.tensorflow.org/api_docs/python/tf/function]) of
`tf.function` for details.
autograph: whether to use autograph to convert Python constructs such as
`if` and `while` to their TensorFlow counterparts. See the
[doc](https://www.tensorflow.org/api_docs/python/tf/function]) of
`tf.function` for details.
experimental_compile: the `experimental_compile` flag for `tf.function`. See
the [doc](https://www.tensorflow.org/api_docs/python/tf/function]) of
`tf.function` for details. This is the recommended way to turn on XLA for
tf.function, but unlike xla_forced_compile, it doesn't force-compile the
entire function into a single XLA op.
Returns:
A trace-compiled version of f. | def jit(f,
static_argnums=(),
xla_forced_compile=False,
input_signature=None,
autograph=False,
experimental_compile=False):
"""Returns a function that runs a trace-compiled version of `f`.
A trace-compiled version of a function `f` has the same behavior as `f` (when
called with the same "static arguments", see below), but runs faster because
the whole computation is compiled into a computation graph once which is
reused for subsequent executions.
The trace compilation happens lazily, when the returned function is called for
the first time. The compiled function may not be cached implicitly and
multiple calls to `jit` may not share the compiled function (see below for
"static" vs "dynamic" arguments).
Args:
f: a function that takes any positional arguments `args` and any keyword
arguments `kwargs`. `ndarray`s and things accepted by
`tf.convert_to_tensor` in `args` and `kwargs` will be treated as 'dynamic
arguments' in the sense that calling the function with different values
for these arguments will not cause retracing. In contrast, arguments of
other types in `args` and `kwargs` are treated as 'static arguments' and
calling the function with different values of them will cause
re-compiling. Positional arguments whose positions are in `static_argnums`
are always treated as static arguments.
static_argnums: a tuple of positions of arguments that will be treated as
static arguments. Note that as aforementioned, any arguments that were not
convertible to tensor will also be static.
xla_forced_compile: if true, it will use XLA to force-compile the graph.
This requires that the function only contain ops that are XLA
compatible. It will compile the entire function into a single XLA op.
input_signature: a list of `tf.TensorSpec`, as the input signature to
control tracing behavior. See the
[doc](https://www.tensorflow.org/api_docs/python/tf/function]) of
`tf.function` for details.
autograph: whether to use autograph to convert Python constructs such as
`if` and `while` to their TensorFlow counterparts. See the
[doc](https://www.tensorflow.org/api_docs/python/tf/function]) of
`tf.function` for details.
experimental_compile: the `experimental_compile` flag for `tf.function`. See
the [doc](https://www.tensorflow.org/api_docs/python/tf/function]) of
`tf.function` for details. This is the recommended way to turn on XLA for
tf.function, but unlike xla_forced_compile, it doesn't force-compile the
entire function into a single XLA op.
Returns:
A trace-compiled version of f.
"""
@tf.function(input_signature=input_signature, autograph=autograph,
experimental_compile=experimental_compile)
def _tf_f(*args, **kwargs):
"""Accelerated function with tensor inputs/outputs."""
np_args = _tf_to_np(args)
kwargs = {k: _tf_to_np(v) for k, v in kwargs.items()}
if xla_forced_compile:
# Use list for mutability
output_is_list = [False]
output_is_empty = [False]
output_structure = [None]
def recorder(args, kwargs, res):
del args, kwargs
# Workaround b/121383831
output_is_list[0] = isinstance(res, list)
# If outputs are empty, xla.compile returns an `Operation`, which we
# don't want.
if tf.nest.flatten(res):
output_is_empty[0] = False
output_structure[0] = None
else:
output_is_empty[0] = True
# Without deepcopy, xla.compile will change output_structure[0] to a
# list of `Operation`.
output_structure[0] = copy.deepcopy(res)
return res
f_ = _record_result_type(recorder, f)
np_out = tf.xla.experimental.compile(lambda: f_(*np_args, **kwargs))
# Workaround b/121383831
if output_is_empty[0]:
np_out = output_structure[0]
elif (isinstance(np_out, list) and len(np_out) == 1 and
not output_is_list[0]):
np_out = np_out[0]
else:
np_out = f(*np_args, **kwargs)
return np_out
def _f(*args, **kwargs):
args = [
_canonicalize_jit_arguments(arg) if i not in static_argnums else arg
for i, arg in enumerate(args)
]
kwargs = {k: _canonicalize_jit_arguments(v) for k, v in kwargs.items()}
tf_out = _tf_f(*args, **kwargs)
return _tf_to_np(tf_out)
_f.tf_function = _tf_f
return _f |
Returns a function that evaluates `f` given input shapes and dtypes.
It transforms function `f` to a function that performs the same computation as
`f` but only on shapes and dtypes (a.k.a. shape inference).
Args:
f: the function to be transformed.
static_argnums: see documentation of `jit`.
allow_static_outputs: whether to allow non-array outputs. If True, non-array
outputs (e.g. Python integers) will be returned as-is; otherwise, they
will be converted to ndarrays, and then specs of those ndarrays will be
returned.
Returns:
A function whose input arguments can be either the same as `f`'s or only
their shapes/dtypes represented by `tf.TensorSpec`, and whose return values
are `tf.TensorSpec`s with the same nested structure as `f`'s return
values. If `allow_static_outputs` is True, when `f` returns some non-array
outputs (e.g. Python integers), the converted function will return them
as-is instead of returning `tf.TensorSpec`s for them. | def eval_on_shapes(f, static_argnums=(), allow_static_outputs=False):
"""Returns a function that evaluates `f` given input shapes and dtypes.
It transforms function `f` to a function that performs the same computation as
`f` but only on shapes and dtypes (a.k.a. shape inference).
Args:
f: the function to be transformed.
static_argnums: see documentation of `jit`.
allow_static_outputs: whether to allow non-array outputs. If True, non-array
outputs (e.g. Python integers) will be returned as-is; otherwise, they
will be converted to ndarrays, and then specs of those ndarrays will be
returned.
Returns:
A function whose input arguments can be either the same as `f`'s or only
their shapes/dtypes represented by `tf.TensorSpec`, and whose return values
are `tf.TensorSpec`s with the same nested structure as `f`'s return
values. If `allow_static_outputs` is True, when `f` returns some non-array
outputs (e.g. Python integers), the converted function will return them
as-is instead of returning `tf.TensorSpec`s for them.
"""
def abstractify(args):
def _abstractify(x):
x = _canonicalize_jit_arg(x)
if isinstance(x, (tf.Tensor, tf_np.ndarray)):
return tf.TensorSpec(x.shape, x.dtype)
else:
return x
new_args = []
for i, arg in enumerate(args):
if i in static_argnums:
new_args.append(arg)
else:
new_args.append(tf.nest.map_structure(_abstractify, arg))
return new_args
if allow_static_outputs:
# When `tf_f` below is called (via get_concrete_function) with the same
# arugments (after abstraction), the Python function `f` won't be run, so we
# need this python_outputs_map to retrieve the Python outputs we've seen
# before that correspond the arguments.
python_outputs_map = {}
def recorder(args, kwargs, res):
# Since the get_concrete_function below only uses positional args, we also
# only positional args here.
del args, kwargs
def is_tensor_like(x):
if hasattr(x, "_type_spec"):
return True # x is a CompositeTensor
return isinstance(x, (tf_np.ndarray, tf.Tensor))
py_values = tf.nest.map_structure(
lambda x: None if is_tensor_like(x) else x,
res)
key = id(tf.compat.v1.get_default_graph())
python_outputs_map[key] = py_values
# Set non-tensor outputs to None to avoid tf.function calling
# tf.convert_to_tensor on them.
res = tf.nest.map_structure(
lambda x: None if not is_tensor_like(x) else x,
res)
return res
f = _record_result_type(recorder, f)
# TODO(wangpeng): tf.function could add a knob to turn off materializing the
# graph, so that we don't waste computation and memory when we just want
# shape inference.
tf_f = jit(f, static_argnums=static_argnums).tf_function
# pylint: disable=missing-docstring
def f_return(*args):
def to_tensor_spec(x):
if isinstance(x, tf.Tensor):
return tf.TensorSpec(x.shape, x.dtype)
else:
return x
new_args = abstractify(args)
cfun = tf_f.get_concrete_function(*new_args)
res = cfun.structured_outputs
res = tf.nest.map_structure(to_tensor_spec, res)
if allow_static_outputs:
key = id(cfun.graph)
py_values = python_outputs_map[key]
# We can also call tf.get_static_value on structured_outputs to retrieve
# the Python values, but since we'll need to use python_outputs_map to
# record "which outputs are static?" anyway, we choose to directly store
# the Python values in python_outputs_map.
res = tf.nest.map_structure(
lambda x, python_value: x if python_value is None else python_value,
res, py_values)
return res
# Provides access to `tf_f` for testing purpose.
f_return._tf_function = tf_f # pylint: disable=protected-access
return f_return |
Pure equivalent of `x[idx] = y`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] = y`. Because it's a pure function, `x` itself won't be
changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`. | def index_update(x, idx, y):
"""Pure equivalent of `x[idx] = y`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] = y`. Because it's a pure function, `x` itself won't be
changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`.
"""
return _index_update_helper(tf_np.ndarray._with_index_update, x, idx, y) |
Pure equivalent of `x[idx] += y`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] += y`. Because it's a pure function, `x` itself won't be
changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`. | def index_add(x, idx, y):
"""Pure equivalent of `x[idx] += y`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] += y`. Because it's a pure function, `x` itself won't be
changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`.
"""
return _index_update_helper(tf_np.ndarray._with_index_add, x, idx, y) |
Pure equivalent of `x[idx] = minimum(x[idx], y)`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] = minimum(x[idx], y)`. Because it's a pure function, `x`
itself won't be changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`. | def index_min(x, idx, y):
"""Pure equivalent of `x[idx] = minimum(x[idx], y)`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] = minimum(x[idx], y)`. Because it's a pure function, `x`
itself won't be changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`.
"""
return _index_update_helper(tf_np.ndarray._with_index_min, x, idx, y) |
Pure equivalent of `x[idx] = maximum(x[idx], y)`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] = maximum(x[idx], y)`. Because it's a pure function, `x`
itself won't be changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`. | def index_max(x, idx, y):
"""Pure equivalent of `x[idx] = maximum(x[idx], y)`.
Returns the value of x that would result from the NumPy-style indexed
assignment `x[idx] = maximum(x[idx], y)`. Because it's a pure function, `x`
itself won't be changed.
Args:
x: an array with the values to be updated.
idx: a Numpy-style index, consisting of `None`, integers, slice objects,
ellipses, ndarrays with integer dtypes, or a tuple of the above.
y: the array of updates. `y` must be broadcastable to the shape of the array
that would be returned by `x[idx]`.
Returns:
The updated version of `x`.
"""
return _index_update_helper(tf_np.ndarray._with_index_max, x, idx, y) |
Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `x` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
Args:
x: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(x), rank(x))`.
keepdims: If true, retains reduced dimensions with length 1.
Returns:
The reduced tensor. | def logsumexp(x, axis=None, keepdims=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `x` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
Args:
x: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(x), rank(x))`.
keepdims: If true, retains reduced dimensions with length 1.
Returns:
The reduced tensor.
"""
return tf_np.asarray(
tf.math.reduce_logsumexp(
input_tensor=x, axis=axis, keepdims=keepdims)) |
Compute 1 / (1 + exp(-x)). | def expit(x):
"""Compute 1 / (1 + exp(-x))."""
return tf_np.asarray(tf.math.sigmoid(x)) |
Computes the Gauss error function of x element-wise. | def erf(x):
"""Computes the Gauss error function of x element-wise."""
return tf_np.asarray(tf.math.erf(x)) |
Compose the output string representation.
e.g., ij, jk, (((1,), (0,)), ((), ())) -> ik
aij, ajk, (((2,), (1,)), ((0,), (0,))) -> aik
Args:
lhs_rep: A string representation for the left-hand side input array
rhs_rep: A string representation for the right-hand side input array
lhs_contraction: Sequence[int] (the contraction dimensions of lhs)
rhs_contraction: Sequence[int] (the contraction dimensions of rhs)
lhs_batch: Sequence[int] (the batch dimensions of lhs)
rhs_batch: Sequence[int] (the batch dimensions of rhs)
Returns:
A string representation of the result array. | def _compose_output_rep(lhs_rep, rhs_rep, lhs_contraction, rhs_contraction,
lhs_batch, rhs_batch):
"""Compose the output string representation.
e.g., ij, jk, (((1,), (0,)), ((), ())) -> ik
aij, ajk, (((2,), (1,)), ((0,), (0,))) -> aik
Args:
lhs_rep: A string representation for the left-hand side input array
rhs_rep: A string representation for the right-hand side input array
lhs_contraction: Sequence[int] (the contraction dimensions of lhs)
rhs_contraction: Sequence[int] (the contraction dimensions of rhs)
lhs_batch: Sequence[int] (the batch dimensions of lhs)
rhs_batch: Sequence[int] (the batch dimensions of rhs)
Returns:
A string representation of the result array.
"""
output_rep = []
for dim in lhs_batch:
output_rep.append(lhs_rep[dim])
for i in _minus(range(len(lhs_rep)), lhs_batch + lhs_contraction):
output_rep.append(lhs_rep[i])
for i in _minus(range(len(rhs_rep)), rhs_batch + rhs_contraction):
output_rep.append(rhs_rep[i])
return "".join(output_rep) |
Compute the non-batched matrix multiplication.
If it is the general non-batched/single-batched matrix multiplication,
use the highly optimized kernel `tf.tensordot` to handle it.
Args:
lhs: an array (the left-hand side matrix/vector to be multiplied)
rhs: an array (the right-hand side matrix/vector to be multiplied)
lhs_contraction: Sequence[int] (the contraction dimensions of lhs)
rhs_contraction: Sequence[int] (the contraction dimensions of rhs)
Returns:
An array that contains the result. | def _non_batched_matmul(lhs, rhs, lhs_contraction, rhs_contraction):
"""Compute the non-batched matrix multiplication.
If it is the general non-batched/single-batched matrix multiplication,
use the highly optimized kernel `tf.tensordot` to handle it.
Args:
lhs: an array (the left-hand side matrix/vector to be multiplied)
rhs: an array (the right-hand side matrix/vector to be multiplied)
lhs_contraction: Sequence[int] (the contraction dimensions of lhs)
rhs_contraction: Sequence[int] (the contraction dimensions of rhs)
Returns:
An array that contains the result.
"""
return tf.tensordot(
lhs, rhs, axes=(list(lhs_contraction), list(rhs_contraction))) |
The general dot operation for TensorFlow.
An equivalent general dot operation as that in JAX -
<https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dot_general.html>
Although there is an implementation in TF XLA, avoid directly using XLA when
possible.
e.g., non-batched: ij,jk->ik
batched: ijk,ikl->ijl
Args:
lhs: an array (the left-hand side matrix/vector to be multiplied)
rhs: an array (the right-hand side matrix/vector to be multiplied)
dimension_numbers: (Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]) – a tuple of tuples of the form
((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims,
rhs_batch_dims))
Returns:
An array that contains the result. | def tf_dot_general(lhs, rhs, dimension_numbers):
"""The general dot operation for TensorFlow.
An equivalent general dot operation as that in JAX -
<https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dot_general.html>
Although there is an implementation in TF XLA, avoid directly using XLA when
possible.
e.g., non-batched: ij,jk->ik
batched: ijk,ikl->ijl
Args:
lhs: an array (the left-hand side matrix/vector to be multiplied)
rhs: an array (the right-hand side matrix/vector to be multiplied)
dimension_numbers: (Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]) – a tuple of tuples of the form
((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims,
rhs_batch_dims))
Returns:
An array that contains the result.
"""
char_list = list(string.ascii_lowercase)
char_list = char_list[8:] + char_list[:8]
lhs_rank, rhs_rank = len(lhs.shape), len(rhs.shape)
lhs_rep = char_list[:lhs_rank]
rhs_rep = char_list[lhs_rank:lhs_rank + rhs_rank]
contraction, batch = dimension_numbers
lhs_contraction, rhs_contraction = contraction
if len(lhs_contraction) != len(rhs_contraction):
raise ValueError(
"The input matrices are required to have the same number "
"of contraction dimensions, but got: lhs {}, rhs: {}".format(
len(lhs_contraction), len(rhs_contraction)))
lhs_batch, rhs_batch = batch
if len(lhs_batch) != len(rhs_batch):
raise ValueError("The input matrices are required to have the same number "
"of batch dimensions, but got: lhs {}, rhs: {}".format(
len(lhs_batch), len(rhs_batch)))
if not lhs_batch and not rhs_batch:
return _non_batched_matmul(lhs, rhs, lhs_contraction, rhs_contraction)
if (lhs_rank == rhs_rank == 3 and lhs_batch == (0,) and rhs_batch == (0,) and
lhs_contraction == (2,) and rhs_contraction == (1,)):
return tf.linalg.matmul(lhs, rhs)
for i in range(len(lhs_contraction)):
rhs_rep[rhs_contraction[i]] = lhs_rep[lhs_contraction[i]]
for i in range(len(lhs_batch)):
rhs_rep[rhs_batch[i]] = lhs_rep[lhs_batch[i]]
output_rep = _compose_output_rep(lhs_rep, rhs_rep, lhs_contraction,
rhs_contraction, lhs_batch, rhs_batch)
equation = "".join(lhs_rep) + "," + "".join(rhs_rep) + "->" + output_rep
return tf.einsum(equation, lhs, rhs) |
Convert strides, lhs_dilation, rhs_dilation to match TF convention.
For example,
in the 3D case, if lhs_dilation = 2, then convert it to [2, 2, 2]
if lhs_dilation = (2, 2, 2), convert it also to [2, 2, 2]
Args:
window_strides: window_strides to be converted
lhs_dilation: lhs_dilation to be converted
rhs_dilation: rhs_dilation to be converted
dim: dim to be converted
Returns:
The updated window_strides, lhs_dilation and rhs_dilation | def _conv_general_param_type_converter(window_strides, lhs_dilation,
rhs_dilation, dim):
"""Convert strides, lhs_dilation, rhs_dilation to match TF convention.
For example,
in the 3D case, if lhs_dilation = 2, then convert it to [2, 2, 2]
if lhs_dilation = (2, 2, 2), convert it also to [2, 2, 2]
Args:
window_strides: window_strides to be converted
lhs_dilation: lhs_dilation to be converted
rhs_dilation: rhs_dilation to be converted
dim: dim to be converted
Returns:
The updated window_strides, lhs_dilation and rhs_dilation
"""
def _as_list_of_size(item, size):
if item is None:
return None
return [item] * size if isinstance(item, int) else list(item)
return (_as_list_of_size(window_strides, dim),
_as_list_of_size(lhs_dilation, dim),
_as_list_of_size(rhs_dilation, dim)) |
A general conv API for TensorFlow.
According JAX version:
https://jax.readthedocs.io/en/stable/_autosummary/jax.lax.conv_general_dilated.html
Args:
lhs: a rank n+2 dimensional input array.
rhs: a rank n+2 dimensional array of kernel weights.
window_strides: a sequence of n integers, representing the inter-window
strides.
padding: either the string ‘SAME’, the string ‘VALID’, or a sequence of n
(low, high) integer pairs that give the padding to apply before and
after each spatial dimension.
output_shape: the output shape of the convolution (only required for
transpose convolution).
lhs_dilation: None, or a sequence of n integers, giving the dilation factor
to apply in each spatial dimension of lhs. LHS dilation is
also known as transposed convolution.
rhs_dilation: None, or a sequence of n integers, giving the dilation factor
to apply in each spatial dimension of rhs. RHS dilation is
also known as atrous convolution.
dimension_numbers: either None, a ConvDimensionNumbers object, or a 3-tuple
(lhs_spec, rhs_spec, out_spec), where each element is a
string of length n+2.
feature_group_count: integer, default 1. Changing this is currently not
supported.
batch_group_count: integer, default 1. Changing this is currently not
supported.
precision: Optional. Either None, which means the default precision for the
backend, or a Precision enum value.
Returns:
A TF NumPy array that contains the convolution result. | def tf_conv_general_dilated(lhs, rhs, window_strides, padding, output_shape,
lhs_dilation=None, rhs_dilation=None,
dimension_numbers=None, feature_group_count=1,
batch_group_count=1, precision=None):
"""A general conv API for TensorFlow.
According JAX version:
https://jax.readthedocs.io/en/stable/_autosummary/jax.lax.conv_general_dilated.html
Args:
lhs: a rank n+2 dimensional input array.
rhs: a rank n+2 dimensional array of kernel weights.
window_strides: a sequence of n integers, representing the inter-window
strides.
padding: either the string ‘SAME’, the string ‘VALID’, or a sequence of n
(low, high) integer pairs that give the padding to apply before and
after each spatial dimension.
output_shape: the output shape of the convolution (only required for
transpose convolution).
lhs_dilation: None, or a sequence of n integers, giving the dilation factor
to apply in each spatial dimension of lhs. LHS dilation is
also known as transposed convolution.
rhs_dilation: None, or a sequence of n integers, giving the dilation factor
to apply in each spatial dimension of rhs. RHS dilation is
also known as atrous convolution.
dimension_numbers: either None, a ConvDimensionNumbers object, or a 3-tuple
(lhs_spec, rhs_spec, out_spec), where each element is a
string of length n+2.
feature_group_count: integer, default 1. Changing this is currently not
supported.
batch_group_count: integer, default 1. Changing this is currently not
supported.
precision: Optional. Either None, which means the default precision for the
backend, or a Precision enum value.
Returns:
A TF NumPy array that contains the convolution result.
"""
dim = None
lhs_spec, rhs_spec, out_spec = dimension_numbers
if lhs_spec != out_spec:
raise ValueError("Current implementation requires the `data_format` of the "
"inputs and outputs to be the same.")
if len(lhs_spec) >= 6:
raise ValueError("Current implmentation does not support 4 or higher"
"dimensional convolution, but got: ", len(lhs_spec) - 2)
dim = len(lhs_spec) - 2
if lhs_dilation and rhs_dilation:
if lhs_dilation == (1,) * dim and rhs_dilation == (1,) * dim:
lhs_dilation, rhs_dilation = None, None
else:
raise ValueError("Current implementation does not support that "
"deconvolution and dilation to be performed at the same "
"time, but got lhs_dilation: {}, rhs_dilation: {}"
.format(lhs_dilation, rhs_dilation))
if padding not in ["SAME", "VALID"]:
raise ValueError("Current implementation requires the padding parameter"
"to be either 'VALID' or 'SAME', but got: ", padding)
if batch_group_count != 1 or feature_group_count != 1:
raise NotImplementedError("batch_group_count and feature_group_count "
"other than 1 is currently not supported, but"
" got feature_group_count: {}, batch_group_count"
": {}".format(feature_group_count,
batch_group_count))
if precision is not None:
raise NotImplementedError("precision other than `None` is currently not "
"supported, but got: {}".format(precision))
# Convert params from int/Sequence[int] to list of ints.
strides, lhs_dilation, rhs_dilation = _conv_general_param_type_converter(
window_strides, lhs_dilation, rhs_dilation, dim
)
# Preprocess the shapes
dim_maps = {}
if isinstance(lhs_spec, str):
dim_maps["I"] = list(rhs_spec).index("I")
dim_maps["O"] = list(rhs_spec).index("O")
dim_maps["N"] = list(lhs_spec).index("N")
dim_maps["C"] = list(lhs_spec).index("C")
else:
dim_maps["I"] = rhs_spec[1]
dim_maps["O"] = rhs_spec[0]
dim_maps["N"] = lhs_spec[0]
dim_maps["C"] = lhs_spec[1]
lhs = tf_np.moveaxis(lhs, (dim_maps["N"], dim_maps["C"]), (0, dim + 1))
# Adjust the filters, put the dimension 'I' and 'O' at last.
rhs = tf_np.moveaxis(rhs, (dim_maps["O"], dim_maps["I"]), (dim + 1, dim))
spatial_dim_maps = {1: "W", 2: "HW", 3: "DHW"}
data_format = "N" + spatial_dim_maps[dim] + "C"
if rhs_dilation or (lhs_dilation is None and rhs_dilation is None):
output = _tf_nn_APIs[dim][0](lhs, rhs, strides, padding, data_format,
rhs_dilation)
else:
output = _tf_nn_APIs[dim][1](lhs, rhs, tf.constant(output_shape), strides,
padding, data_format, lhs_dilation)
output = tf_np.moveaxis(output, (0, dim + 1), (dim_maps["N"], dim_maps["C"]))
return output |
Convolution over an N-D array.
See https://www.tensorflow.org/api_docs/python/tf/nn/convolution and
https://www.tensorflow.org/xla/operation_semantics#conv_convolution for
reference.
Args:
inp: an (N+2)-D array. The input of the convolution.
fltr: an (N+2)-D array. The filter (i.e. kernel) of the convolution.
window_strides: a sequence of N ints, the strides for moving the convolution
window.
padding: a string, either "VALID" or "SAME". The padding algorithm.
dimension_numbers: a tuple of three strings encoding the data format of
input, filter and output. "I" means input; "O" means output; "C" means
channel; other characters such as "W", "H" and "D" means spatial
dimensions.
filter_dilation: the dilation rates for the filter. Dilating the filter
means adding "holes" to the filter.
Returns:
An (N+2)-D array. The convolution result. | def conv(inp,
fltr,
window_strides,
padding,
dimension_numbers,
filter_dilation=None):
"""Convolution over an N-D array.
See https://www.tensorflow.org/api_docs/python/tf/nn/convolution and
https://www.tensorflow.org/xla/operation_semantics#conv_convolution for
reference.
Args:
inp: an (N+2)-D array. The input of the convolution.
fltr: an (N+2)-D array. The filter (i.e. kernel) of the convolution.
window_strides: a sequence of N ints, the strides for moving the convolution
window.
padding: a string, either "VALID" or "SAME". The padding algorithm.
dimension_numbers: a tuple of three strings encoding the data format of
input, filter and output. "I" means input; "O" means output; "C" means
channel; other characters such as "W", "H" and "D" means spatial
dimensions.
filter_dilation: the dilation rates for the filter. Dilating the filter
means adding "holes" to the filter.
Returns:
An (N+2)-D array. The convolution result.
"""
input_spec, filter_spec, output_spec = dimension_numbers
if input_spec != output_spec:
raise ValueError("Input and output data formats must be the same; got %s "
"and %s" % (input_spec, output_spec))
supported_filter_spec = ["WIO", "HWIO", "DHWIO"]
if filter_spec not in supported_filter_spec:
raise ValueError("The supported data format for the filter are %s; got %s" %
(supported_filter_spec, filter_spec))
if input_spec[1:-1] != filter_spec[:-2]:
raise ValueError("Input data format (%s) is not compatible with filter "
"data format (%s)" % (input_spec, filter_spec))
# No type promotion in order to prevent accidentally doing more expensive
# computation.
dtype = tf_np.result_type(inp, fltr)
inp = tf_np.asarray(inp, dtype)
fltr = tf_np.asarray(fltr, dtype)
return tf_np.asarray(
tf.nn.convolution(
input=inp,
filters=fltr,
padding=padding,
strides=window_strides,
dilations=filter_dilation,
data_format=input_spec)) |
Performs an N-D average pooling.
Args:
x: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]`. Pooling happens over the spatial dimensions only.
pool_size: sequence of N ints.
strides: sequence of N ints.
padding: a string, the padding algorithm. Must be "SAME" or "VALID".
Returns:
An (N+2)-D array, of shape
[batch_size] + output_spatial_shape + [num_channels],
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]). | def avg_pool(x, pool_size, strides, padding):
"""Performs an N-D average pooling.
Args:
x: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]`. Pooling happens over the spatial dimensions only.
pool_size: sequence of N ints.
strides: sequence of N ints.
padding: a string, the padding algorithm. Must be "SAME" or "VALID".
Returns:
An (N+2)-D array, of shape
[batch_size] + output_spatial_shape + [num_channels],
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]).
"""
x = tf_np.asarray(x)
return tf_np.asarray(
tf.nn.pool(
input=x,
window_shape=pool_size,
pooling_type="AVG",
strides=strides,
padding=padding)) |
Performs an N-D max pooling.
Args:
x: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]`. Pooling happens over the spatial dimensions only.
pool_size: sequence of N ints.
strides: sequence of N ints.
padding: a string, the padding algorithm. Must be "SAME" or "VALID".
Returns:
An (N+2)-D array, of shape
[batch_size] + output_spatial_shape + [num_channels],
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]). | def max_pool(x, pool_size, strides, padding):
"""Performs an N-D max pooling.
Args:
x: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]`. Pooling happens over the spatial dimensions only.
pool_size: sequence of N ints.
strides: sequence of N ints.
padding: a string, the padding algorithm. Must be "SAME" or "VALID".
Returns:
An (N+2)-D array, of shape
[batch_size] + output_spatial_shape + [num_channels],
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (pool_size[i] - 1)) / strides[i]).
"""
x = tf_np.asarray(x)
return tf_np.asarray(
tf.nn.pool(
input=x,
window_shape=pool_size,
pooling_type="MAX",
strides=strides,
padding=padding)) |
Sorts keys along a dimension and applies same permutation to values.
Args:
keys: an array. The dtype must be comparable numbers (integers and reals).
values: an array, with the same shape of `keys`.
dimension: an `int`. The dimension along which to sort.
Returns:
Permuted keys and values. | def sort_key_val(keys, values, dimension=-1):
"""Sorts keys along a dimension and applies same permutation to values.
Args:
keys: an array. The dtype must be comparable numbers (integers and reals).
values: an array, with the same shape of `keys`.
dimension: an `int`. The dimension along which to sort.
Returns:
Permuted keys and values.
"""
keys = tf_np.asarray(keys)
values = tf_np.asarray(values)
rank = keys.shape.ndims
if rank is None:
rank = values.shape.ndims
if rank is None:
# We need to know the rank because tf.gather requires batch_dims to be `int`
raise ValueError("The rank of either keys or values must be known, but "
"both are unknown (i.e. their shapes are both None).")
if dimension in (-1, rank - 1):
def maybe_swapaxes(a):
return a
else:
def maybe_swapaxes(a):
return tf_np.swapaxes(a, dimension, -1)
# We need to swap axes because tf.gather (and tf.gather_nd) supports
# batch_dims on the left but not on the right.
# TODO(wangpeng): Investigate whether we should do swapaxes or moveaxis.
keys = maybe_swapaxes(keys)
values = maybe_swapaxes(values)
idxs = tf_np.argsort(keys)
# Using tf.gather rather than np.take because the former supports batch_dims
def gather(a):
return tf_np.asarray(tf.gather(a, idxs, batch_dims=rank - 1))
keys = gather(keys)
values = gather(values)
keys = maybe_swapaxes(keys)
values = maybe_swapaxes(values)
return keys, values |
Scan a function over leading array axes while carrying along state.
See the docstring of `jax.lax.scan`
(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scan.html) for
details.
Args:
f: a Python function to be scanned of type ``c -> a -> (c, b)``, meaning
that ``f`` accepts two arguments where the first is a value of the loop
carry and the second is a slice of ``xs`` along its leading axis, and that
``f`` returns a pair where the first element represents a new value for
the loop carry and the second represents a slice of the output. Note that
the input and output carry must have the same dtype.
init: an initial loop carry value of type ``c``, which can be a scalar,
array, or any pytree (nested Python tuple/list/dict) thereof, representing
the initial loop carry value. This value must have the same structure as
the first element of the pair returned by ``f``.
xs: the value of type ``[a]`` over which to scan along the leading axis,
where ``[a]`` can be an array or any pytree (nested Python
tuple/list/dict) thereof with consistent leading axis sizes.
length: optional integer specifying the number of loop iterations, which
must agree with the sizes of leading axes of the arrays in ``xs`` (but can
be used to perform scans where no input ``xs`` are needed).
reverse: optional boolean specifying whether to run the scan iteration
forward (the default) or in reverse, equivalent to reversing the leading
axes of the arrays in both ``xs`` and in ``ys``.
Returns:
A pair of type ``(c, [b])`` where the first element represents the final
loop carry value and the second element represents the stacked outputs of
the second output of ``f`` when scanned over the leading axis of the inputs. | def scan(f, init, xs, length=None, reverse=False):
"""Scan a function over leading array axes while carrying along state.
See the docstring of `jax.lax.scan`
(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scan.html) for
details.
Args:
f: a Python function to be scanned of type ``c -> a -> (c, b)``, meaning
that ``f`` accepts two arguments where the first is a value of the loop
carry and the second is a slice of ``xs`` along its leading axis, and that
``f`` returns a pair where the first element represents a new value for
the loop carry and the second represents a slice of the output. Note that
the input and output carry must have the same dtype.
init: an initial loop carry value of type ``c``, which can be a scalar,
array, or any pytree (nested Python tuple/list/dict) thereof, representing
the initial loop carry value. This value must have the same structure as
the first element of the pair returned by ``f``.
xs: the value of type ``[a]`` over which to scan along the leading axis,
where ``[a]`` can be an array or any pytree (nested Python
tuple/list/dict) thereof with consistent leading axis sizes.
length: optional integer specifying the number of loop iterations, which
must agree with the sizes of leading axes of the arrays in ``xs`` (but can
be used to perform scans where no input ``xs`` are needed).
reverse: optional boolean specifying whether to run the scan iteration
forward (the default) or in reverse, equivalent to reversing the leading
axes of the arrays in both ``xs`` and in ``ys``.
Returns:
A pair of type ``(c, [b])`` where the first element represents the final
loop carry value and the second element represents the stacked outputs of
the second output of ``f`` when scanned over the leading axis of the inputs.
"""
init, xs = tf.nest.map_structure(
lambda x: tf_np.asarray(x) if x is not None else None, (init, xs))
if length is not None:
length = int(length)
def get_length(x):
if x is None:
return None
if x.shape.rank == 0:
raise ValueError("Some array in `xs` doesn't have a leading dimension")
return x.shape[0]
lengths = tf.nest.flatten(tf.nest.map_structure(get_length, xs))
for l in lengths:
if l is not None:
if length is None:
length = l
elif length != l:
raise ValueError("There are two different leading-dimension lengths: "
f"{length} and {l}")
if length is None:
raise ValueError(
"Can't determine length. Please set the `length` argument.")
xs_ta = tf.nest.map_structure(
lambda t: (tf.TensorArray(t.dtype, size=length, dynamic_size=False) # pylint: disable=g-long-lambda
.unstack(t) if t is not None else None),
xs)
# tf.while_loop doesn't allow None in loop_vars, so we mask them.
is_init_none = tf.nest.map_structure(lambda x: x is None, init)
def to_safe(carry):
return tf.nest.map_structure(
lambda x, is_none: tf.zeros([]) if is_none else x, carry, is_init_none)
def from_safe(safe_carry):
return tf.nest.map_structure(
lambda x, is_none: None if is_none else x, safe_carry, is_init_none)
def body(i, safe_carry, ys_ta):
carry = from_safe(safe_carry)
if reverse:
i_ = length - 1 - i
else:
i_ = i
xs = tf.nest.map_structure(
lambda x_ta: x_ta.read(i_) if x_ta is not None else None, xs_ta)
carry, ys = f(*_tf_to_np((carry, xs)))
ys_ta = tf.nest.map_structure(
lambda y_ta, y: (y_ta.write(i_, y) if y is not None else y_ta),
ys_ta, ys)
i = i + 1
safe_carry = to_safe(carry)
return i, safe_carry, ys_ta
xs_spec = tf.nest.map_structure(
lambda t: tf.TensorSpec(t.shape[1:], t.dtype) if t is not None else None,
xs)
_, ys_spec = eval_on_shapes(f)(init, xs_spec)
# ys_ta can't contain None because tf.while_loop doesn't allow None in
# loop_vars.
ys_ta = tf.nest.map_structure(
lambda y: tf.TensorArray(y.dtype if y is not None else tf.float32, # pylint: disable=g-long-lambda
size=length, dynamic_size=False),
ys_spec)
safe_init = to_safe(init)
_, safe_carry, ys_ta = tf.while_loop(
lambda i, *_: i < length, body, (0, safe_init, ys_ta),
maximum_iterations=length)
carry = from_safe(safe_carry)
def _stack(a, spec):
if spec is None:
return None
a = a.stack()
a.set_shape((length,) + a.shape[1:])
return a
ys = tf.nest.map_structure(_stack, ys_ta, ys_spec)
return _tf_to_np((carry, ys)) |
Map a function over leading array axes.
See the docstring of `jax.lax.map`
(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.map.html) for
details.
Args:
f: a Python function to apply element-wise over the first axis or axes of
`xs`.
xs: values over which to map along the leading axis.
Returns:
Mapped values. | def tf_map(f, xs):
"""Map a function over leading array axes.
See the docstring of `jax.lax.map`
(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.map.html) for
details.
Args:
f: a Python function to apply element-wise over the first axis or axes of
`xs`.
xs: values over which to map along the leading axis.
Returns:
Mapped values.
"""
def g(unused, x):
return unused, f(x)
carry = tf.nest.map_structure(lambda _: None, xs)
return scan(g, carry, xs)[1] |
Calcuates the indices for `tf.gather_nd` from slices.
Args:
operand: a Tensor to slice.
start_indices: a vector Tensor of integers, one per dimension. The starts of
the slice. The vector can be dynamic.
slice_sizes: a list of integers, one per dimension. The sizes of the slice.
Returns:
An index array suitable for `tf.gather_nd` and `tf.scatter_nd`, or `None` if
`operand` is a scalar. | def _get_dynamic_indices(operand, start_indices, slice_sizes):
"""Calcuates the indices for `tf.gather_nd` from slices.
Args:
operand: a Tensor to slice.
start_indices: a vector Tensor of integers, one per dimension. The starts of
the slice. The vector can be dynamic.
slice_sizes: a list of integers, one per dimension. The sizes of the slice.
Returns:
An index array suitable for `tf.gather_nd` and `tf.scatter_nd`, or `None` if
`operand` is a scalar.
"""
rank = len(slice_sizes)
operand_rank = tf.rank(operand)
tf.debugging.Assert(operand_rank == rank, [operand_rank, rank])
starts_rank = tf.rank(start_indices)
tf.debugging.Assert(starts_rank == 1, [starts_rank])
num_starts = tf.shape(start_indices)[0]
tf.debugging.Assert(num_starts == rank, [num_starts, rank])
operand_shape = tf.shape(operand)
tf.debugging.Assert(tf.reduce_all(slice_sizes <= operand_shape),
[slice_sizes, operand_shape])
if rank == 0:
return None
start_indices = tf.where(
start_indices < 0, start_indices + operand_shape, start_indices)
idx_list = []
for i in range(rank):
start = start_indices[i]
size = slice_sizes[i]
dim = operand_shape[i]
start = tf.clip_by_value(start, 0, dim - size)
# XLA requires tf.range's `start` to be compile-time constant, so we can't
# do tf.range(start, ...).
idx = start + tf.range(size)
shape = [1] * rank
shape[i] = size
idx = tf.reshape(idx, shape)
idx_list.append(idx)
slice_sizes_tensor = tf.convert_to_tensor(slice_sizes)
# tf.stack doesn't support broadcasting, so we need to broadcast manually.
# TODO(wangpeng): Reduce peak memory by broadcasting one-by-one instead of
# all-together.
idx_list = [tf.broadcast_to(x, slice_sizes_tensor) for x in idx_list]
return tf.stack(idx_list, axis=-1) |
Slicing operation where the indices can be dynamic vlaues.
See the docstring of `jax.lax.dynamic_slice`
(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dynamic_slice.html)
for details.
Args:
operand: an array to slice.
start_indices: a vector of integers, one per dimension. The starts of the
slice. The vector can be dynamic.
slice_sizes: a list of integers, one per dimension. The sizes of the slice.
Returns:
An array containing the slice, with shape equal to `slice_sizes`. | def dynamic_slice(operand, start_indices, slice_sizes):
"""Slicing operation where the indices can be dynamic vlaues.
See the docstring of `jax.lax.dynamic_slice`
(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dynamic_slice.html)
for details.
Args:
operand: an array to slice.
start_indices: a vector of integers, one per dimension. The starts of the
slice. The vector can be dynamic.
slice_sizes: a list of integers, one per dimension. The sizes of the slice.
Returns:
An array containing the slice, with shape equal to `slice_sizes`.
"""
# This implementation uses tf.gather_nd to implement dynamic_slice, which is
# memory inefficient because the size of `indices` given to gather_nd is
# large.
operand = tf_np.asarray(operand).data
start_indices = tf_np.asarray(start_indices, np.int32).data
idx = _get_dynamic_indices(operand, start_indices, slice_sizes)
if idx is not None:
operand = tf.gather_nd(operand, idx)
return tf_np.asarray(operand) |
Updates a dynamic slice.
See the docstring of `jax.lax.dynamic_update_slice`
(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dynamic_update_slice.html)
for details.
Args:
operand: an array to slice.
update: an array containing the new values to write onto `operand`.
start_indices: a vector of integers, one per dimension. The starts of the
slice. The vector can be dynamic.
Returns:
The updated version of `operand`. | def dynamic_update_slice(operand, update, start_indices):
"""Updates a dynamic slice.
See the docstring of `jax.lax.dynamic_update_slice`
(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dynamic_update_slice.html)
for details.
Args:
operand: an array to slice.
update: an array containing the new values to write onto `operand`.
start_indices: a vector of integers, one per dimension. The starts of the
slice. The vector can be dynamic.
Returns:
The updated version of `operand`.
"""
operand = tf_np.asarray(operand).data
update = tf_np.asarray(update).data
start_indices = tf_np.asarray(start_indices, np.int32).data
if not update.shape.is_fully_defined():
raise ValueError("update's shape must be fully defined")
slice_sizes = update.shape
idx = _get_dynamic_indices(operand, start_indices, slice_sizes)
if idx is None:
# `np.zeros([])[()] = 1.0` will result in a scalar array of 1.0
return tf_np.asarray(update)
operand = tf.tensor_scatter_nd_update(operand, idx, update)
return tf_np.asarray(operand) |
Convenience wrapper around dynamic_slice applying to one dimension. | def dynamic_slice_in_dim(operand, start_index, slice_size, axis=0):
"""Convenience wrapper around dynamic_slice applying to one dimension."""
operand = tf_np.asarray(operand)
start_indices = [0] * operand.ndim
slice_sizes = list(operand.shape)
axis = int(axis)
start_indices[axis] = start_index
slice_sizes[axis] = int(slice_size)
return dynamic_slice(operand, start_indices, slice_sizes) |
Convenience wrapper around dynamic_update_slice for one dimension. | def dynamic_update_slice_in_dim(operand, update, start_index, axis):
"""Convenience wrapper around dynamic_update_slice for one dimension."""
operand = tf_np.asarray(operand)
axis = int(axis)
start_indices = [0] * operand.ndim
start_indices[axis] = start_index
return dynamic_update_slice(operand, update, start_indices) |
Converts an RNG key to an RNG seed.
Args:
a: an RNG key, an ndarray of shape [] and dtype `np.int64`.
Returns:
an RNG seed, a tensor of shape [2] and dtype `tf.int32`. | def _key2seed(a):
"""Converts an RNG key to an RNG seed.
Args:
a: an RNG key, an ndarray of shape [] and dtype `np.int64`.
Returns:
an RNG seed, a tensor of shape [2] and dtype `tf.int32`.
"""
def int64_to_int32s(a):
"""Converts an int64 tensor of shape [] to an int32 tensor of shape [2]."""
a = tf.cast(a, tf.uint64)
fst = tf.cast(a, tf.uint32)
snd = tf.cast(
tf.bitwise.right_shift(a, tf.constant(32, tf.uint64)), tf.uint32)
a = [fst, snd]
a = tf.nest.map_structure(lambda x: tf.cast(x, tf.int32), a)
a = tf.stack(a)
return a
return int64_to_int32s(a) |
Converts an RNG seed to an RNG key.
Args:
a: an RNG seed, a tensor of shape [2] and dtype `tf.int32`.
Returns:
an RNG key, an ndarray of shape [] and dtype `np.int64`. | def _seed2key(a):
"""Converts an RNG seed to an RNG key.
Args:
a: an RNG seed, a tensor of shape [2] and dtype `tf.int32`.
Returns:
an RNG key, an ndarray of shape [] and dtype `np.int64`.
"""
def int32s_to_int64(a):
"""Converts an int32 tensor of shape [2] to an int64 tensor of shape []."""
a = tf.bitwise.bitwise_or(
tf.cast(a[0], tf.uint64),
tf.bitwise.left_shift(
tf.cast(a[1], tf.uint64), tf.constant(32, tf.uint64)))
a = tf.cast(a, tf.int64)
return a
return tf_np.asarray(int32s_to_int64(a)) |
Creates RNG state from seed.
Args:
s: the seed, an integer.
Returns:
An RNG state, as a scalar array of dtype `np.int64`. | def prng(s):
"""Creates RNG state from seed.
Args:
s: the seed, an integer.
Returns:
An RNG state, as a scalar array of dtype `np.int64`.
"""
# TODO(wangpeng): Become bitwise-identical to JAX when TF stateless RNGs get
# improved.
return tf_np.asarray(s, dtype=_RNG_KEY_DTYPE) |
Splits an RNG seed into `num` new seeds by adding a leading axis.
Example:
>>> seed = [1, 2]
>>> new_seeds = tf.random.experimental.stateless_split(seed, num=3)
>>> print(new_seeds)
tf.Tensor(
[[1105988140 1738052849]
[-335576002 370444179]
[ 10670227 -246211131]], shape=(3, 2), dtype=int32)
>>> tf.random.stateless_normal(shape=[3], seed=new_seeds[0, :])
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.59835213, -0.9578608 ,
0.9002807 ], dtype=float32)>
Args:
seed: an RNG seed (a tensor with shape [2] and dtype `int32` or `int64`).
(When using XLA, only `int32` is allowed.)
num: optional, a positive integer or scalar tensor indicating the number of
seeds to produce (default 2).
Returns:
A tensor with shape [num, 2] representing `num` new seeds. It will have the
same dtype as `seed` (if `seed` doesn't have an explict dtype, the dtype
will be determined by `tf.convert_to_tensor`). | def stateless_split(seed, num=2):
"""Splits an RNG seed into `num` new seeds by adding a leading axis.
Example:
>>> seed = [1, 2]
>>> new_seeds = tf.random.experimental.stateless_split(seed, num=3)
>>> print(new_seeds)
tf.Tensor(
[[1105988140 1738052849]
[-335576002 370444179]
[ 10670227 -246211131]], shape=(3, 2), dtype=int32)
>>> tf.random.stateless_normal(shape=[3], seed=new_seeds[0, :])
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.59835213, -0.9578608 ,
0.9002807 ], dtype=float32)>
Args:
seed: an RNG seed (a tensor with shape [2] and dtype `int32` or `int64`).
(When using XLA, only `int32` is allowed.)
num: optional, a positive integer or scalar tensor indicating the number of
seeds to produce (default 2).
Returns:
A tensor with shape [num, 2] representing `num` new seeds. It will have the
same dtype as `seed` (if `seed` doesn't have an explict dtype, the dtype
will be determined by `tf.convert_to_tensor`).
"""
seed = tf.convert_to_tensor(seed)
return tf.random.stateless_uniform(
shape=[num, 2], seed=seed, dtype=seed.dtype, minval=None, maxval=None) |
Creates new independent RNG states from an existing state.
Args:
state: the existing state.
num: the number of the new states.
Returns:
A tuple of new states. | def split(state, num):
"""Creates new independent RNG states from an existing state.
Args:
state: the existing state.
num: the number of the new states.
Returns:
A tuple of new states.
"""
state = tf_np.asarray(state, dtype=_RNG_KEY_DTYPE)
state = _key2seed(state)
try:
states = tf.random.experimental.stateless_split(state, num)
except AttributeError as e: # pylint: disable=unused-variable
# TODO(afrozm): For TF < 2.3 we need to do this. Delete once 2.3 launches.
states = stateless_split(state, num)
states = tf.unstack(states, num)
states = tf.nest.map_structure(_seed2key, states)
return states |
Sample uniform random values in range [`minval`, `maxval`).
Args:
key: the RNG key.
shape: the shape of the result.
dtype: the dtype of the result.
minval: the minimal value (inclusive).
maxval: the maximal value (exclusive).
Returns:
An ndarray with shape `shape` and dtype `dtype`. Each value in the ndarray
is sampled uniformly randomly in range [`minval`, `maxval`). | def uniform(key,
shape,
dtype=tf_np.random.DEFAULT_RANDN_DTYPE,
minval=0.,
maxval=1.):
"""Sample uniform random values in range [`minval`, `maxval`).
Args:
key: the RNG key.
shape: the shape of the result.
dtype: the dtype of the result.
minval: the minimal value (inclusive).
maxval: the maximal value (exclusive).
Returns:
An ndarray with shape `shape` and dtype `dtype`. Each value in the ndarray
is sampled uniformly randomly in range [`minval`, `maxval`).
"""
minval = tf.cast(minval, dtype)
maxval = tf.cast(maxval, dtype)
key = tf_np.asarray(key, dtype=_RNG_KEY_DTYPE)
return tf_np.asarray(
tf.random.stateless_uniform(
shape, seed=_key2seed(key), dtype=dtype, minval=minval,
maxval=maxval)) |
Sample standard-normal random values.
Args:
key: the RNG key.
shape: the shape of the result.
dtype: the dtype of the result.
Returns:
Random values in standard-normal distribution. | def normal(key, shape, dtype=tf.float32):
"""Sample standard-normal random values.
Args:
key: the RNG key.
shape: the shape of the result.
dtype: the dtype of the result.
Returns:
Random values in standard-normal distribution.
"""
key = tf_np.asarray(key, dtype=_RNG_KEY_DTYPE)
return tf_np.asarray(
tf.random.stateless_normal(shape, seed=_key2seed(key), dtype=dtype)) |
Sample Bernoulli random values with given shape and mean.
Args:
key: the RNG key.
mean: optional, an array_like broadcastable to `shape` for the mean of the
random variables (default 0.5).
shape: optional, a tuple of nonnegative integers representing the shape
(default to `mean`'s shape).
Returns:
A random array with the specified shape and boolean dtype. | def bernoulli(key, mean=np.float32(0.5), shape=None):
"""Sample Bernoulli random values with given shape and mean.
Args:
key: the RNG key.
mean: optional, an array_like broadcastable to `shape` for the mean of the
random variables (default 0.5).
shape: optional, a tuple of nonnegative integers representing the shape
(default to `mean`'s shape).
Returns:
A random array with the specified shape and boolean dtype.
"""
mean = tf_np.asarray(mean)
if shape is None:
shape = mean.shape
return uniform(key, shape) < mean |
Converts a `tf.data.Dataset` to an iterable of ndarrays.
`dataset_as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of ndarrays and ndarrays, respectively. This
function must be run in eager mode outside tf.function.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of ndarrays and `tf.Tensor`s are converted to ndarrays. | def dataset_as_numpy(dataset):
"""Converts a `tf.data.Dataset` to an iterable of ndarrays.
`dataset_as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of ndarrays and ndarrays, respectively. This
function must be run in eager mode outside tf.function.
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of ndarrays and `tf.Tensor`s are converted to ndarrays.
"""
if not tf.executing_eagerly():
raise ValueError(
"dataset_as_numpy must be run in eager mode outside tf.function")
nested_ds = dataset
del dataset
# Flatten
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
# Type check for Tensors and Datasets
for ds_el in flat_ds:
if not isinstance(ds_el, (tf.Tensor, tf.data.Dataset)):
types = tf.nest.map_structure(type, nested_ds)
raise ValueError("Arguments to dataset_as_numpy must be (possibly nested "
"structure of) tf.Tensors or tf.data.Datasets. Got: %s" %
types)
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = tf_np.asarray(ds_el)
elif isinstance(ds_el, tf.data.Dataset):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
return tf.nest.pack_sequence_as(nested_ds, flat_np) |
Records axis_name and devices for this context. | def pmap_config(axis_name, devices):
"""Records axis_name and devices for this context."""
old_axis_name = _pmap_config.axis_name()
old_devices = _pmap_config.devices()
_pmap_config.set_axis_name(axis_name)
_pmap_config.set_devices(devices)
try:
yield
finally:
_pmap_config.set_axis_name(old_axis_name)
_pmap_config.set_devices(old_devices) |
Sum all-reduction.
Args:
tensor: A tensor.
axis_name: The axis name to reduce. Must equal to that of the surrounding
pmap.
Returns:
The sum of the `tensor` replicas on each participating devices. | def _psum(tensor, axis_name=None):
"""Sum all-reduction.
Args:
tensor: A tensor.
axis_name: The axis name to reduce. Must equal to that of the surrounding
pmap.
Returns:
The sum of the `tensor` replicas on each participating devices.
"""
if axis_name != _pmap_config.axis_name():
raise ValueError("axis_name (%s) is not equal to that of the surrounding "
"pmap (%s)" % (axis_name, _pmap_config.axis_name()))
devices = _pmap_config.devices()
if devices is None:
raise ValueError("Can't retrieve the device list from the surrounding pmap")
tensor = tf_np.asarray(tensor)
if tpu_devices(devices):
# TODO(b/170895907): Remove this workaround when tpu.cross_replica_sum
# supports int64/float64.
is_int64 = False
is_float64 = False
if tensor.dtype == np.int64:
is_int64 = True
tensor = tensor.astype(np.int32)
elif tensor.dtype == np.float64:
is_float64 = True
tensor = tensor.astype(np.float32)
# TODO(wangpeng): Supply the `group_assignment` argument to
# tpu.cross_replica_sum, calculated from `devices`.
tensor = tf.compat.v1.tpu.cross_replica_sum(tensor)
if is_int64:
tensor = tf.cast(tensor, tf.int64)
elif is_float64:
tensor = tf.cast(tensor, tf.float64)
else:
tensor = tf.raw_ops.CollectiveReduce(
input=tensor,
group_size=len(devices),
group_key=_GROUP_KEY,
instance_key=_get_instance_key(),
merge_op="Add",
final_op="Id",
subdiv_offsets=(0,))
return tf_np.asarray(tensor) |
Mean all-reduction.
Args:
tensor: A tensor.
axis_name: The axis name to reduce. Must equal to that of the surrounding
pmap.
Returns:
The mean of the `tensor` replicas on each participating devices. | def pmean(tensor, axis_name=None):
"""Mean all-reduction.
Args:
tensor: A tensor.
axis_name: The axis name to reduce. Must equal to that of the surrounding
pmap.
Returns:
The mean of the `tensor` replicas on each participating devices.
"""
if axis_name != _pmap_config.axis_name():
raise ValueError("axis_name (%s) is not equal to that of the surrounding "
"pmap (%s)" % (axis_name, _pmap_config.axis_name()))
devices = _pmap_config.devices()
if devices is None:
raise ValueError("Can't retrieve the device list from the surrounding pmap")
if tpu_devices(devices):
# TODO(wangpeng): Implement this.
raise ValueError("pmean for TPU is not supported yet.")
else:
return tf.raw_ops.CollectiveReduce(
input=tensor,
group_size=len(devices),
group_key=_GROUP_KEY,
instance_key=_get_instance_key(),
merge_op="Add",
final_op="Div",
subdiv_offsets=(0,)) |
This is a helper function to return the pmap impl.
Args:
f: a function that takes ndarrays and returns ndarrays.
devices: a list of strings; the device list.
has_tpu: boolean; whether `devices` contains TPU devices.
Returns:
A function that takes tensors and returns tensors. | def _get_pmap_impl(f, devices, has_tpu):
"""This is a helper function to return the pmap impl.
Args:
f: a function that takes ndarrays and returns ndarrays.
devices: a list of strings; the device list.
has_tpu: boolean; whether `devices` contains TPU devices.
Returns:
A function that takes tensors and returns tensors.
"""
if has_tpu:
# Workaround b/121383831
output_is_list = [False] # Use list for mutability
def recorder(args, kwargs, res):
del args, kwargs
output_is_list[0] = isinstance(res, list)
return res
f = _record_result_type(recorder, f)
def tf_f(*tf_args):
"""A wrapper for `f` that takes/returns tensors."""
np_args = _tf_to_np(tf_args)
np_out = f(*np_args)
return np_out
if has_tpu:
@tf.function(autograph=False)
def fn(inputs):
# TODO(wangpeng): Supply the `device_assignment` argument to
# tpu.replicate, calculated from `devices`.
res = tf.compat.v1.tpu.replicate(tf_f, inputs)
# Workaround b/121383831
if (res and isinstance(res[0], list) and len(res[0]) == 1 and
not output_is_list[0]):
res = [x[0] for x in res]
return res
return fn
else:
# This is run in a tf.function so that the various underlying functions can
# be run in parallel.
# The trace happens on the client, so any devices should not depend on any
# side effects.
jit_tf_f = tf.function(tf_f, autograph=False)
@tf.function(autograph=False)
def fn(all_per_device_args):
"""Multi-device function with calls placed on the correct device."""
results = []
for per_device_args, device in zip(all_per_device_args, devices):
with tf.device(device):
results.append(jit_tf_f(*per_device_args))
return results
return fn |
Transforms a function into a multi-device function.
The semantics are similar to JAX's pmap.
Args:
f: The function to be converted.
axis_name: Used for nested pmap, which is not supported yet.
devices: The devices over which the returned function will run.
Returns:
A function that runs the underlying function `f` on `devices`. Its arguments
can be `ShardedNdArray`s, tensors or other Python objects, and its return
values are all `ShardedNdArray`s. If an input is a tensor, the length of its
first dimension must equal the number of devices, and the tensor will be
splitted along its first dimension among the devices. If an input is an
unknown Python object, it will be replicated among the devices. | def pmap(f, axis_name=None, devices=None):
"""Transforms a function into a multi-device function.
The semantics are similar to JAX's pmap.
Args:
f: The function to be converted.
axis_name: Used for nested pmap, which is not supported yet.
devices: The devices over which the returned function will run.
Returns:
A function that runs the underlying function `f` on `devices`. Its arguments
can be `ShardedNdArray`s, tensors or other Python objects, and its return
values are all `ShardedNdArray`s. If an input is a tensor, the length of its
first dimension must equal the number of devices, and the tensor will be
splitted along its first dimension among the devices. If an input is an
unknown Python object, it will be replicated among the devices.
"""
if devices is None:
devices = accelerators()
if not isinstance(devices, (list, tuple)):
raise ValueError("Must pass a list or tuple of devices")
num_devices = len(devices)
if not num_devices:
raise ValueError("There must be at least 1 device")
has_tpu = bool(tpu_devices(devices))
pmap_fn = _get_pmap_impl(f, devices, has_tpu)
def wrapper(*args):
"""Wrapper that wraps/unwraps args, retvals, and runs the function."""
if _pmap_config.devices() is not None:
raise ValueError("Found a surrounding pmap. Nested pmap is not supported "
"yet.")
# TODO(wangpeng): Maybe we should use `asarray` to convert everything
# to ndarray first.
flattened_input_args = tf.nest.flatten(args)
flattened_per_device_args = [[] for _ in devices]
for arg in flattened_input_args:
if isinstance(arg, tf.Tensor):
# TODO(nareshmodi): Try and use the dynamic shape instead.
if (not arg.shape.rank) or arg.shape[0] != len(devices):
# TODO(nareshmodi): Fix this restriction
raise ValueError(
"Input tensors need to have a first dimension equal to "
"the number of devices; got tensor of shape %s and %s devices" %
(arg.shape, len(devices)))
# NOTE: Alternatively use tf.split, and place the split tensors on the
# appropriate device. The best solution for this is to have an API that
# splits a tensor across devices.
for j, device in enumerate(devices):
updated_arg = tf.gather(arg, j)
# TODO(wangpeng): Investigate whether we need a tf.identity for TPU.
if not has_tpu:
with tf.device(device):
updated_arg = tf.identity(updated_arg)
flattened_per_device_args[j].append(updated_arg)
elif isinstance(arg, ShardedNdArray):
for device_args, tensor in zip(flattened_per_device_args, arg.tensors):
device_args.append(tensor)
else:
for device_args in flattened_per_device_args:
device_args.append(arg)
all_per_device_args = [
tf.nest.pack_sequence_as(args, device_args)
for device_args in flattened_per_device_args
]
with pmap_config(axis_name, devices):
results = pmap_fn(all_per_device_args)
# Rewrap things. This can probably be written better.
flattened_results = [tf.nest.flatten(result) for result in results]
final_tree = []
# TODO(nareshmodi): assert all items in flattened_results have the same
# structures
for i in range(len(flattened_results[0])):
tensors = []
for j, device in enumerate(devices):
assert isinstance(
flattened_results[j][i],
tf.Tensor), ("currently only tensor return items are supported")
tensors.append(flattened_results[j][i])
final_tree.append(ShardedNdArray(tensors))
return tf.nest.pack_sequence_as(results[0], final_tree)
return wrapper |
Gets TPU devices out of `devices`.
Args:
devices: A device list (as a list of strings). If None, the list of all
available devices will be used for it.
Returns:
Those in `devices` that are TPUs. | def tpu_devices(devices=None):
"""Gets TPU devices out of `devices`.
Args:
devices: A device list (as a list of strings). If None, the list of all
available devices will be used for it.
Returns:
Those in `devices` that are TPUs.
"""
return find_devices("TPU", devices) |
Gets GPU devices out of `devices`.
Args:
devices: A device list (as a list of strings). If None, the list of all
available devices will be used for it.
Returns:
Those in `devices` that are GPUs. | def gpu_devices(devices=None):
"""Gets GPU devices out of `devices`.
Args:
devices: A device list (as a list of strings). If None, the list of all
available devices will be used for it.
Returns:
Those in `devices` that are GPUs.
"""
return find_devices("GPU", devices) |
Broadcasts `s` to the nested structure `to`. | def _tree_broadcast(to, s):
"""Broadcasts `s` to the nested structure `to`."""
if not isinstance(to, (list, tuple, dict)):
if not isinstance(s, (int, type(None))):
raise ValueError
return s
if isinstance(s, (int, type(None))):
return tf.nest.map_structure(lambda x: s, to)
if isinstance(to, (list, tuple)):
if len(to) != len(s):
raise ValueError
new_s = [_tree_broadcast(x, y) for x, y in zip(to, s)]
if isinstance(to, tuple):
new_s = tuple(new_s)
return new_s
elif isinstance(to, dict):
return {k: _tree_broadcast(to[k], s[k]) for k in to}
else:
raise TypeError("Unsupported type %s" % type(to)) |
Returns a function that maps `f` over first dimension of inputs. | def vmap(f, in_axes=0, out_axes=0):
"""Returns a function that maps `f` over first dimension of inputs."""
in_axes_flat = tf.nest.flatten(in_axes)
if not all(isinstance(l, (type(None), int))
for l in in_axes_flat):
raise TypeError(
"vmap in_axes must be an int, None, or (nested) container with "
"those types as leaves, but got {}.".format(in_axes))
if all(isinstance(l, type(None)) for l in in_axes_flat):
raise ValueError("vmap must have at least one non-None value in in_axes")
out_axes_flat = tf.nest.flatten(out_axes)
if not all(isinstance(l, (type(None), int))
for l in out_axes_flat):
raise TypeError(
"vmap out_axes must be an int, None, or (nested) container with "
"those types as leaves, but got {}.".format(out_axes))
def _f(*args):
flat_args = tf.nest.flatten(args)
try:
f_in_axes = _tree_broadcast(args, in_axes)
except ValueError:
six.reraise(
ValueError,
ValueError(
"vmap in_axes specification must be a tree prefix of the "
r"corresponding value, got specification %s for value tree %s" % (
in_axes, args)),
sys.exc_info()[2])
f_in_axes_flat = tf.nest.flatten(f_in_axes)
def tf_f(tf_args):
"""Function passed to tf.vectorized_map call."""
# Note that unbatched arguments are not passed to tf_f. Here we fill thos
# arguments back before calling `f`.
tf_flat_args = []
j = 0
for arg, axis in zip(flat_args, f_in_axes_flat):
if axis is None:
tf_flat_args.append(arg)
else:
tf_flat_args.append(tf_args[j])
j += 1
unbatched_args = tf.nest.pack_sequence_as(args, tf_flat_args)
return f(*unbatched_args)
# Constructs arguments to pass to `tf_f`.
# Unbatch arguments are skipped. Arguments with non-zero axis are
# transposed.
tf_args = []
for arg, axis in zip(flat_args, f_in_axes_flat):
if axis is None:
continue
arg = tf_np.asarray(arg)
if axis != 0:
arg = tf_np.moveaxis(arg, axis, 0)
tf_args.append(arg)
# TODO(agarwal): consider creating a tf.function outside of _f and reusing
# that to avoid overheads of re-vectorizing the code when running eagerly.
outputs = tf.vectorized_map(tf_f, tf_args)
try:
f_out_axes = _tree_broadcast(outputs, out_axes)
except ValueError:
six.reraise(
ValueError,
ValueError(
"vmap out_axes specification must be a tree prefix of the "
r"corresponding value, got specification %s for value tree %s" % (
out_axes, outputs)),
sys.exc_info()[2])
def map_output(x, axis):
"""Maps output of tf.vectorized_map to the final output."""
x = tf_np.asarray(x)
if axis is None:
# Note that `tf.vectorized_map always batches the outputs.
# Here we unbatch it again.
return x[0, ...]
elif axis == 0:
return x
else:
# Need to transpose the output.
return tf_np.moveaxis(x, 0, axis)
new_outputs = [map_output(output, axis) for output, axis in zip(
tf.nest.flatten(outputs), tf.nest.flatten(f_out_axes))]
return tf.nest.pack_sequence_as(outputs, new_outputs)
return _f |
Read an environment variable and interpret it as a boolean.
True values are (case insensitive): 'y', 'yes', 't', 'true', 'on', and '1';
false values are 'n', 'no', 'f', 'false', 'off', and '0'.
Args:
varname: the name of the variable
default: the default boolean value
Raises: ValueError if the environment variable is anything else. | def bool_env(varname: str, default: bool) -> bool:
"""Read an environment variable and interpret it as a boolean.
True values are (case insensitive): 'y', 'yes', 't', 'true', 'on', and '1';
false values are 'n', 'no', 'f', 'false', 'off', and '0'.
Args:
varname: the name of the variable
default: the default boolean value
Raises: ValueError if the environment variable is anything else.
"""
val = os.getenv(varname, str(default))
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
else:
raise ValueError("invalid truth value %r for environment %r" % (val, varname)) |
Returns all shapes that broadcast to `shape`. | def _broadcastable_shapes(shape):
"""Returns all shapes that broadcast to `shape`."""
def f(rshape):
yield []
if rshape:
for s in f(rshape[1:]):
yield rshape[0:1] + s
if rshape[0] != 1:
for s in f(rshape[1:]):
yield [1] + s
for x in f(list(reversed(shape))):
yield list(reversed(x)) |
Decorator that promotes the arguments of `fun` to `lnp.result_type(*args)`.
lnp and onp have different type promotion semantics; this decorator allows
tests make an onp reference implementation act more like an lnp
implementation. | def _promote_like_lnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `lnp.result_type(*args)`.
lnp and onp have different type promotion semantics; this decorator allows
tests make an onp reference implementation act more like an lnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tf.nest.flatten(args)
if inexact and not any(
lnp.issubdtype(lnp.result_type(x).as_numpy_dtype, lnp.inexact)
for x in flat_args):
dtype = lnp.result_type(lnp.float_, *flat_args)
else:
dtype = lnp.result_type(*flat_args)
dtype = dtype.as_numpy_dtype
args = tf.nest.map_structure(lambda a: onp.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper |
A version that allows an empty param list. | def named_parameters(ls):
"""A version that allows an empty param list."""
def noop(_):
def wrapper(self, *args, **kwargs):
self.skipTest("Empty parameter list")
return wrapper
if isinstance(ls, (list, tuple)) and not ls:
return noop
if isinstance(ls, itertools.chain):
try:
first = next(ls)
except StopIteration:
return noop
else:
ls = itertools.chain([first], ls)
return parameterized.named_parameters(ls) |
Chooses `if_true` of `if_false` based on device_under_test. | def if_device_under_test(device_type: Union[str, Sequence[str]],
if_true, if_false):
"""Chooses `if_true` of `if_false` based on device_under_test."""
if device_under_test() in ([device_type] if isinstance(device_type, str)
else device_type):
return if_true
else:
return if_false |
A decorator for test methods to skip the test on certain devices. | def skip_on_devices(*disabled_devices):
"""A decorator for test methods to skip the test on certain devices."""
def skip(test_method):
@functools.wraps(test_method)
def test_method_wrapper(self, *args, **kwargs):
device = device_under_test()
if device in disabled_devices:
test_name = getattr(test_method, '__name__', '[unknown test]')
raise unittest.SkipTest(
f"{test_name} not supported on {device.upper()}.")
return test_method(self, *args, **kwargs)
return test_method_wrapper
return skip |
A decorator for test methods to skip the test when flags are set. | def skip_on_flag(flag_name, skip_value):
"""A decorator for test methods to skip the test when flags are set."""
def skip(test_method): # pylint: disable=missing-docstring
@functools.wraps(test_method)
def test_method_wrapper(self, *args, **kwargs):
flag_value = getattr(FLAGS, flag_name)
if flag_value == skip_value:
test_name = getattr(test_method, '__name__', '[unknown test]')
raise unittest.SkipTest(
f"{test_name} not supported when FLAGS.{flag_name} is {flag_value}")
return test_method(self, *args, **kwargs)
return test_method_wrapper
return skip |
Converts `shape` to a tuple of dimensions. | def _dims_of_shape(shape):
"""Converts `shape` to a tuple of dimensions."""
if type(shape) in (list, tuple):
return shape
elif isinstance(shape, ScalarShape):
return ()
else:
raise TypeError(type(shape)) |
Casts `value` to the correct Python type for `shape` and `dtype`. | def _cast_to_shape(value, shape, dtype):
"""Casts `value` to the correct Python type for `shape` and `dtype`."""
if shape is NUMPY_SCALAR_SHAPE:
# explicitly cast to NumPy scalar in case `value` is a Python scalar.
return onp.dtype(dtype).type(value)
elif shape is PYTHON_SCALAR_SHAPE:
# explicitly cast to Python scalar via https://stackoverflow.com/a/11389998
return onp.asarray(value).item()
elif type(shape) in (list, tuple):
assert onp.shape(value) == tuple(shape)
return value
else:
raise TypeError(type(shape)) |
Produce random values given shape, dtype, scale, and post-processor.
Args:
rand: a function for producing random values of a given shape, e.g. a
bound version of either onp.RandomState.randn or onp.RandomState.rand.
shape: a shape value as a tuple of positive integers.
dtype: a numpy dtype.
scale: optional, a multiplicative scale for the random values (default 1).
post: optional, a callable for post-processing the random values (default
identity).
Returns:
An ndarray of the given shape and dtype using random values based on a call
to rand but scaled, converted to the appropriate dtype, and post-processed. | def _rand_dtype(rand, shape, dtype, scale=1., post=lambda x: x):
"""Produce random values given shape, dtype, scale, and post-processor.
Args:
rand: a function for producing random values of a given shape, e.g. a
bound version of either onp.RandomState.randn or onp.RandomState.rand.
shape: a shape value as a tuple of positive integers.
dtype: a numpy dtype.
scale: optional, a multiplicative scale for the random values (default 1).
post: optional, a callable for post-processing the random values (default
identity).
Returns:
An ndarray of the given shape and dtype using random values based on a call
to rand but scaled, converted to the appropriate dtype, and post-processed.
"""
r = lambda: onp.asarray(scale * rand(*_dims_of_shape(shape)), dtype)
if onp.issubdtype(dtype, onp.complexfloating):
vals = r() + 1.0j * r()
else:
vals = r()
return _cast_to_shape(onp.asarray(post(vals), dtype), shape, dtype) |
Return a random sampler that produces infinities in floating types. | def rand_some_inf():
"""Return a random sampler that produces infinities in floating types."""
rng = npr.RandomState(1)
base_rand = rand_default()
"""
TODO: Complex numbers are not correctly tested
If blocks should be switched in order, and relevant tests should be fixed
"""
def rand(shape, dtype):
"""The random sampler function."""
if not onp.issubdtype(dtype, onp.floating):
# only float types have inf
return base_rand(shape, dtype)
if onp.issubdtype(dtype, onp.complexfloating):
base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
onp.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
dims = _dims_of_shape(shape)
posinf_flips = rng.rand(*dims) < 0.1
neginf_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = onp.where(posinf_flips, onp.array(onp.inf, dtype=dtype), vals)
vals = onp.where(neginf_flips, onp.array(-onp.inf, dtype=dtype), vals)
return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)
return rand |
Return a random sampler that produces nans in floating types. | def rand_some_nan():
"""Return a random sampler that produces nans in floating types."""
rng = npr.RandomState(1)
base_rand = rand_default()
def rand(shape, dtype):
"""The random sampler function."""
if onp.issubdtype(dtype, onp.complexfloating):
base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
onp.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
if not onp.issubdtype(dtype, onp.floating):
# only float types have inf
return base_rand(shape, dtype)
dims = _dims_of_shape(shape)
nan_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = onp.where(nan_flips, onp.array(onp.nan, dtype=dtype), vals)
return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)
return rand |
Return a random sampler that produces infinities in floating types. | def rand_some_inf_and_nan():
"""Return a random sampler that produces infinities in floating types."""
rng = npr.RandomState(1)
base_rand = rand_default()
"""
TODO: Complex numbers are not correctly tested
If blocks should be switched in order, and relevant tests should be fixed
"""
def rand(shape, dtype):
"""The random sampler function."""
if not onp.issubdtype(dtype, onp.floating):
# only float types have inf
return base_rand(shape, dtype)
if onp.issubdtype(dtype, onp.complexfloating):
base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
onp.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
dims = _dims_of_shape(shape)
posinf_flips = rng.rand(*dims) < 0.1
neginf_flips = rng.rand(*dims) < 0.1
nan_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = onp.where(posinf_flips, onp.array(onp.inf, dtype=dtype), vals)
vals = onp.where(neginf_flips, onp.array(-onp.inf, dtype=dtype), vals)
vals = onp.where(nan_flips, onp.array(onp.nan, dtype=dtype), vals)
return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)
return rand |
Return a random sampler that produces some zeros. | def rand_some_zero():
"""Return a random sampler that produces some zeros."""
rng = npr.RandomState(1)
base_rand = rand_default()
def rand(shape, dtype):
"""The random sampler function."""
dims = _dims_of_shape(shape)
zeros = rng.rand(*dims) < 0.5
vals = base_rand(shape, dtype)
vals = onp.where(zeros, onp.array(0, dtype=dtype), vals)
return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)
return rand |
Returns an empty array with the specified shape and dtype.
Args:
shape: A fully defined shape. Could be - NumPy array or a python scalar,
list or tuple of integers, - TensorFlow tensor/ndarray of integer type and
rank <=1.
dtype: Optional, defaults to float. The type of the resulting ndarray. Could
be a python type, a NumPy type or a TensorFlow `DType`.
Returns:
An ndarray. | def empty(shape, dtype=float): # pylint: disable=redefined-outer-name
"""Returns an empty array with the specified shape and dtype.
Args:
shape: A fully defined shape. Could be - NumPy array or a python scalar,
list or tuple of integers, - TensorFlow tensor/ndarray of integer type and
rank <=1.
dtype: Optional, defaults to float. The type of the resulting ndarray. Could
be a python type, a NumPy type or a TensorFlow `DType`.
Returns:
An ndarray.
"""
return zeros(shape, dtype) |
Returns an empty array with the shape and possibly type of the input array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can be
converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the input array. The type of the
resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
`DType`.
Returns:
An ndarray. | def empty_like(a, dtype=None):
"""Returns an empty array with the shape and possibly type of the input array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can be
converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the input array. The type of the
resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
`DType`.
Returns:
An ndarray.
"""
return zeros_like(a, dtype) |
Returns an ndarray with the given shape and type filled with zeros.
Args:
shape: A fully defined shape. Could be - NumPy array or a python scalar,
list or tuple of integers, - TensorFlow tensor/ndarray of integer type and
rank <=1.
dtype: Optional, defaults to float. The type of the resulting ndarray. Could
be a python type, a NumPy type or a TensorFlow `DType`.
Returns:
An ndarray. | def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name
"""Returns an ndarray with the given shape and type filled with zeros.
Args:
shape: A fully defined shape. Could be - NumPy array or a python scalar,
list or tuple of integers, - TensorFlow tensor/ndarray of integer type and
rank <=1.
dtype: Optional, defaults to float. The type of the resulting ndarray. Could
be a python type, a NumPy type or a TensorFlow `DType`.
Returns:
An ndarray.
"""
if dtype:
dtype = utils.result_type(dtype)
if isinstance(shape, arrays_lib.ndarray):
shape = shape.data
return arrays_lib.tensor_to_ndarray(tf.zeros(shape, dtype=dtype)) |
Returns an array of zeros with the shape and type of the input array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can be
converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the input array. The type of the
resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
`DType`.
Returns:
An ndarray. | def zeros_like(a, dtype=None):
"""Returns an array of zeros with the shape and type of the input array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can be
converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the input array. The type of the
resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
`DType`.
Returns:
An ndarray.
"""
if isinstance(a, arrays_lib.ndarray):
a = a.data
if dtype is None:
# We need to let utils.result_type decide the dtype, not tf.zeros_like
dtype = utils.result_type(a)
else:
# TF and numpy has different interpretations of Python types such as
# `float`, so we let `utils.result_type` decide.
dtype = utils.result_type(dtype)
dtype = tf.as_dtype(dtype) # Work around b/149877262
return arrays_lib.tensor_to_ndarray(tf.zeros_like(a, dtype)) |
Returns an ndarray with the given shape and type filled with ones.
Args:
shape: A fully defined shape. Could be - NumPy array or a python scalar,
list or tuple of integers, - TensorFlow tensor/ndarray of integer type and
rank <=1.
dtype: Optional, defaults to float. The type of the resulting ndarray. Could
be a python type, a NumPy type or a TensorFlow `DType`.
Returns:
An ndarray. | def ones(shape, dtype=float): # pylint: disable=redefined-outer-name
"""Returns an ndarray with the given shape and type filled with ones.
Args:
shape: A fully defined shape. Could be - NumPy array or a python scalar,
list or tuple of integers, - TensorFlow tensor/ndarray of integer type and
rank <=1.
dtype: Optional, defaults to float. The type of the resulting ndarray. Could
be a python type, a NumPy type or a TensorFlow `DType`.
Returns:
An ndarray.
"""
if dtype:
dtype = utils.result_type(dtype)
if isinstance(shape, arrays_lib.ndarray):
shape = shape.data
return arrays_lib.tensor_to_ndarray(tf.ones(shape, dtype=dtype)) |
Returns an array of ones with the shape and type of the input array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can be
converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the input array. The type of the
resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
`DType`.
Returns:
An ndarray. | def ones_like(a, dtype=None):
"""Returns an array of ones with the shape and type of the input array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can be
converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the input array. The type of the
resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
`DType`.
Returns:
An ndarray.
"""
if isinstance(a, arrays_lib.ndarray):
a = a.data
if dtype is None:
dtype = utils.result_type(a)
else:
dtype = utils.result_type(dtype)
return arrays_lib.tensor_to_ndarray(tf.ones_like(a, dtype)) |
Returns a square array with ones on the main diagonal and zeros elsewhere.
Args:
n: number of rows/cols.
dtype: Optional, defaults to float. The type of the resulting ndarray. Could
be a python type, a NumPy type or a TensorFlow `DType`.
Returns:
An ndarray of shape (n, n) and requested type. | def identity(n, dtype=float):
"""Returns a square array with ones on the main diagonal and zeros elsewhere.
Args:
n: number of rows/cols.
dtype: Optional, defaults to float. The type of the resulting ndarray. Could
be a python type, a NumPy type or a TensorFlow `DType`.
Returns:
An ndarray of shape (n, n) and requested type.
"""
return eye(N=n, M=n, dtype=dtype) |
Returns an array with given shape and dtype filled with `fill_value`.
Args:
shape: A valid shape object. Could be a native python object or an object
of type ndarray, numpy.ndarray or tf.TensorShape.
fill_value: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the `fill_value`. The type of the
resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
`DType`.
Returns:
An ndarray.
Raises:
ValueError: if `fill_value` can not be broadcast to shape `shape`. | def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name
"""Returns an array with given shape and dtype filled with `fill_value`.
Args:
shape: A valid shape object. Could be a native python object or an object
of type ndarray, numpy.ndarray or tf.TensorShape.
fill_value: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the `fill_value`. The type of the
resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
`DType`.
Returns:
An ndarray.
Raises:
ValueError: if `fill_value` can not be broadcast to shape `shape`.
"""
fill_value = asarray(fill_value, dtype=dtype)
if utils.isscalar(shape):
shape = tf.reshape(shape, [1])
return arrays_lib.tensor_to_ndarray(tf.broadcast_to(fill_value.data, shape)) |
order, subok and shape arguments mustn't be changed. | def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name
"""order, subok and shape arguments mustn't be changed."""
if order != 'K':
raise ValueError('Non-standard orders are not supported.')
if not subok:
raise ValueError('subok being False is not supported.')
if shape:
raise ValueError('Overriding the shape is not supported.')
a = asarray(a).data
dtype = dtype or utils.result_type(a)
fill_value = asarray(fill_value, dtype=dtype)
return arrays_lib.tensor_to_ndarray(
tf.broadcast_to(fill_value.data, tf.shape(a))) |
Creates an ndarray with the contents of val.
Args:
val: array_like. Could be an ndarray, a Tensor or any object that can be
converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the `val`. The type of the resulting
ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`.
copy: Determines whether to create a copy of the backing buffer. Since
Tensors are immutable, a copy is made only if val is placed on a different
device than the current one. Even if `copy` is False, a new Tensor may
need to be built to satisfy `dtype` and `ndim`. This is used only if `val`
is an ndarray or a Tensor.
ndmin: The minimum rank of the returned array.
Returns:
An ndarray. | def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Creates an ndarray with the contents of val.
Args:
val: array_like. Could be an ndarray, a Tensor or any object that can be
converted to a Tensor using `tf.convert_to_tensor`.
dtype: Optional, defaults to dtype of the `val`. The type of the resulting
ndarray. Could be a python type, a NumPy type or a TensorFlow `DType`.
copy: Determines whether to create a copy of the backing buffer. Since
Tensors are immutable, a copy is made only if val is placed on a different
device than the current one. Even if `copy` is False, a new Tensor may
need to be built to satisfy `dtype` and `ndim`. This is used only if `val`
is an ndarray or a Tensor.
ndmin: The minimum rank of the returned array.
Returns:
An ndarray.
"""
if dtype:
dtype = utils.result_type(dtype)
if isinstance(val, arrays_lib.ndarray):
result_t = val.data
else:
result_t = val
if copy and isinstance(result_t, tf.Tensor):
# Note: In eager mode, a copy of `result_t` is made only if it is not on
# the context device.
result_t = tf.identity(result_t)
if not isinstance(result_t, tf.Tensor):
if not dtype:
dtype = utils.result_type(result_t)
# We can't call `convert_to_tensor(result_t, dtype=dtype)` here because
# convert_to_tensor doesn't allow incompatible arguments such as (5.5, int)
# while np.array allows them. We need to convert-then-cast.
def maybe_data(x):
if isinstance(x, arrays_lib.ndarray):
return x.data
return x
# Handles lists of ndarrays
result_t = tf.nest.map_structure(maybe_data, result_t)
result_t = arrays_lib.convert_to_tensor(result_t)
result_t = tf.cast(result_t, dtype=dtype)
elif dtype:
result_t = tf.cast(result_t, dtype)
ndims = tf.rank(result_t)
def true_fn():
old_shape = tf.shape(result_t)
new_shape = tf.concat([tf.ones(ndmin - ndims, tf.int32), old_shape], axis=0)
return tf.reshape(result_t, new_shape)
result_t = utils.cond(utils.greater(ndmin, ndims), true_fn, lambda: result_t)
return arrays_lib.tensor_to_ndarray(result_t) |
Returns `step`-separated values in the range [start, stop).
Args:
start: Start of the interval. Included in the range.
stop: End of the interval. If not specified, `start` is treated as 0 and
`start` value is used as `stop`. If specified, it is not included in the
range if `step` is integer. When `step` is floating point, it may or may
not be included.
step: The difference between 2 consecutive values in the output range. It is
recommended to use `linspace` instead of using non-integer values for
`step`.
dtype: Optional. Type of the resulting ndarray. Could be a python type, a
NumPy type or a TensorFlow `DType`. If not provided, the largest type of
`start`, `stop`, `step` is used.
Raises:
ValueError: If step is zero. | def arange(start, stop=None, step=1, dtype=None):
"""Returns `step`-separated values in the range [start, stop).
Args:
start: Start of the interval. Included in the range.
stop: End of the interval. If not specified, `start` is treated as 0 and
`start` value is used as `stop`. If specified, it is not included in the
range if `step` is integer. When `step` is floating point, it may or may
not be included.
step: The difference between 2 consecutive values in the output range. It is
recommended to use `linspace` instead of using non-integer values for
`step`.
dtype: Optional. Type of the resulting ndarray. Could be a python type, a
NumPy type or a TensorFlow `DType`. If not provided, the largest type of
`start`, `stop`, `step` is used.
Raises:
ValueError: If step is zero.
"""
if not step:
raise ValueError('step must be non-zero.')
if dtype:
dtype = utils.result_type(dtype)
else:
if stop is None:
dtype = utils.result_type(start, step)
else:
dtype = utils.result_type(start, step, stop)
if step > 0 and ((stop is not None and start > stop) or
(stop is None and start < 0)):
return array([], dtype=dtype)
if step < 0 and ((stop is not None and start < stop) or
(stop is None and start > 0)):
return array([], dtype=dtype)
# TODO(srbs): There are some bugs when start or stop is float type and dtype
# is integer type.
return arrays_lib.tensor_to_ndarray(
tf.cast(tf.range(start, limit=stop, delta=step), dtype=dtype)) |
Raises an error if input is not 1- or 2-d. | def diag(v, k=0): # pylint: disable=missing-docstring
"""Raises an error if input is not 1- or 2-d."""
v = asarray(v).data
v_rank = tf.rank(v)
v.shape.with_rank_at_most(2)
# TODO(nareshmodi): Consider a utils.Assert version that will fail during
# tracing time if the shape is known.
tf.debugging.Assert(
utils.logical_or(tf.equal(v_rank, 1), tf.equal(v_rank, 2)), [v_rank])
def _diag(v, k):
return utils.cond(
tf.equal(tf.size(v), 0),
lambda: tf.zeros([abs(k), abs(k)], dtype=v.dtype),
lambda: tf.linalg.diag(v, k=k))
def _diag_part(v, k):
v_shape = tf.shape(v)
v, k = utils.cond(
utils.logical_or(
utils.less_equal(k, -1 * utils.getitem(v_shape, 0)),
utils.greater_equal(k, utils.getitem(v_shape, 1)),
), lambda: (tf.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k))
result = tf.linalg.diag_part(v, k=k)
return result
result = utils.cond(
tf.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k))
return utils.tensor_to_ndarray(result) |
Returns a 2-d array with flattened `v` as diagonal.
Args:
v: array_like of any rank. Gets flattened when setting as diagonal. Could be
an ndarray, a Tensor or any object that can be converted to a Tensor using
`tf.convert_to_tensor`.
k: Position of the diagonal. Defaults to 0, the main diagonal. Positive
values refer to diagonals shifted right, negative values refer to
diagonals shifted left.
Returns:
2-d ndarray. | def diagflat(v, k=0):
"""Returns a 2-d array with flattened `v` as diagonal.
Args:
v: array_like of any rank. Gets flattened when setting as diagonal. Could be
an ndarray, a Tensor or any object that can be converted to a Tensor using
`tf.convert_to_tensor`.
k: Position of the diagonal. Defaults to 0, the main diagonal. Positive
values refer to diagonals shifted right, negative values refer to
diagonals shifted left.
Returns:
2-d ndarray.
"""
v = asarray(v)
return diag(tf.reshape(v.data, [-1]), k) |
Whether all array elements or those along an axis evaluate to true.
Casts the array to bool type if it is not already and uses `tf.reduce_all` to
compute the result.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Could be an int or a tuple of integers. If not specified,
the reduction is performed over all array indices.
keepdims: If true, retains reduced dimensions with length 1.
Returns:
An ndarray. Note that unlike NumPy this does not return a scalar bool if
`axis` is None. | def all(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
"""Whether all array elements or those along an axis evaluate to true.
Casts the array to bool type if it is not already and uses `tf.reduce_all` to
compute the result.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Could be an int or a tuple of integers. If not specified,
the reduction is performed over all array indices.
keepdims: If true, retains reduced dimensions with length 1.
Returns:
An ndarray. Note that unlike NumPy this does not return a scalar bool if
`axis` is None.
"""
a = asarray(a, dtype=bool)
return utils.tensor_to_ndarray(
tf.reduce_all(input_tensor=a.data, axis=axis, keepdims=keepdims)) |
Whether any element in the entire array or in an axis evaluates to true.
Casts the array to bool type if it is not already and uses `tf.reduce_any` to
compute the result.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Could be an int or a tuple of integers. If not specified,
the reduction is performed over all array indices.
keepdims: If true, retains reduced dimensions with length 1.
Returns:
An ndarray. Note that unlike NumPy this does not return a scalar bool if
`axis` is None. | def any(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
"""Whether any element in the entire array or in an axis evaluates to true.
Casts the array to bool type if it is not already and uses `tf.reduce_any` to
compute the result.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Could be an int or a tuple of integers. If not specified,
the reduction is performed over all array indices.
keepdims: If true, retains reduced dimensions with length 1.
Returns:
An ndarray. Note that unlike NumPy this does not return a scalar bool if
`axis` is None.
"""
a = asarray(a, dtype=bool)
return utils.tensor_to_ndarray(
tf.reduce_any(input_tensor=a.data, axis=axis, keepdims=keepdims)) |
Compresses `a` by selecting values along `axis` with `condition` true.
Uses `tf.boolean_mask`.
Args:
condition: 1-d array of bools. If `condition` is shorter than the array
axis (or the flattened array if axis is None), it is padded with False.
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Axis along which to select elements. If None, `condition` is
applied on flattened array.
Returns:
An ndarray.
Raises:
ValueError: if `condition` is not of rank 1. | def compress(condition, a, axis=None):
"""Compresses `a` by selecting values along `axis` with `condition` true.
Uses `tf.boolean_mask`.
Args:
condition: 1-d array of bools. If `condition` is shorter than the array
axis (or the flattened array if axis is None), it is padded with False.
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Axis along which to select elements. If None, `condition` is
applied on flattened array.
Returns:
An ndarray.
Raises:
ValueError: if `condition` is not of rank 1.
"""
condition = asarray(condition, dtype=bool)
a = asarray(a)
if condition.ndim != 1:
raise ValueError('condition must be a 1-d array.')
# `np.compress` treats scalars as 1-d arrays.
if a.ndim == 0:
a = ravel(a)
if axis is None:
a = ravel(a)
axis = 0
if axis < 0:
axis += a.ndim
assert axis >= 0 and axis < a.ndim
# `tf.boolean_mask` requires the first dimensions of array and condition to
# match. `np.compress` pads condition with False when it is shorter.
condition_t = condition.data
a_t = a.data
if condition.shape[0] < a.shape[axis]:
padding = tf.fill([a.shape[axis] - condition.shape[0]], False)
condition_t = tf.concat([condition_t, padding], axis=0)
return utils.tensor_to_ndarray(tf.boolean_mask(tensor=a_t, mask=condition_t,
axis=axis)) |
Returns a copy of the array. | def copy(a):
"""Returns a copy of the array."""
return array(a, copy=True) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.