text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Apply a sequence of functions to the input or output of a layer.
<END_TASK>
<USER_TASK:>
Description:
def layer_prepostprocess(previous_value,
x,
sequence,
dropout_rate,
norm_type,
depth,
epsilon,
default_name,
name=None,
dropout_broadcast_dims=None,
layer_collection=None):
"""Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
z: zero add
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
norm_type: a string (see apply_norm())
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
a Tensor
""" |
with tf.variable_scope(name, default_name=default_name):
if sequence == "none":
return x
for c in sequence:
if c == "a":
x += previous_value
elif c == "z":
x = zero_add(previous_value, x)
elif c == "n":
x = apply_norm(
x, norm_type, depth, epsilon, layer_collection=layer_collection)
else:
assert c == "d", ("Unknown sequence step %s" % c)
x = dropout_with_broadcast_dims(
x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return x |
<SYSTEM_TASK:>
Apply layer preprocessing.
<END_TASK>
<USER_TASK:>
Description:
def layer_preprocess(layer_input, hparams, layer_collection=None):
"""Apply layer preprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_preprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
hparams: a hyperparameters object.
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
a Tensor
""" |
assert "a" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
assert "z" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
return layer_prepostprocess(
None,
layer_input,
sequence=hparams.layer_preprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_prepostprocess",
layer_collection=layer_collection) |
<SYSTEM_TASK:>
Apply layer postprocessing.
<END_TASK>
<USER_TASK:>
Description:
def layer_postprocess(layer_input, layer_output, hparams):
"""Apply layer postprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_postprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
layer_output: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
""" |
return layer_prepostprocess(
layer_input,
layer_output,
sequence=hparams.layer_postprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_type=hparams.norm_type,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_postprocess") |
<SYSTEM_TASK:>
A block of convolutions.
<END_TASK>
<USER_TASK:>
Description:
def conv_block_internal(conv_fn,
inputs,
filters,
dilation_rates_and_kernel_sizes,
first_relu=True,
use_elu=False,
separabilities=None,
**kwargs):
"""A block of convolutions.
Args:
conv_fn: convolution function, e.g. conv or separable_conv.
inputs: a Tensor
filters: an Integer
dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h))
first_relu: whether to do a relu at start (defaults to True)
use_elu: whether to use ELUs instead of ReLUs (defaults to False)
separabilities: list of separability factors (per-layer).
**kwargs: additional arguments (e.g., pooling)
Returns:
a Tensor.
""" |
name = kwargs.pop("name") if "name" in kwargs else None
mask = kwargs.pop("mask") if "mask" in kwargs else None
# Usage for normalize_fn kwarg:
# if not specified, use layer norm
# if given normalize_fn=None, don't use any normalization
# if given normalize_fn=norm, use the specified norm function
use_layer_norm = "normalizer_fn" not in kwargs
norm = kwargs.pop("normalizer_fn", None)
use_normalizer_fn = use_layer_norm or norm
if use_layer_norm:
norm = lambda x, name: layer_norm(x, filters, name=name)
with tf.variable_scope(name, "conv_block", [inputs]):
cur, counter = inputs, -1
for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes:
counter += 1
if first_relu or counter > 0:
cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur)
if mask is not None:
cur *= mask
if separabilities:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
separability=separabilities[counter],
**kwargs)
else:
cur = conv_fn(
cur,
filters,
kernel_size,
dilation_rate=dilation_rate,
name="conv_block_%d" % counter,
use_bias=norm is None,
**kwargs)
if use_normalizer_fn:
cur = norm(cur, name="conv_block_norm_%d" % counter)
return cur |
<SYSTEM_TASK:>
A block of standard 2d convolutions.
<END_TASK>
<USER_TASK:>
Description:
def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs):
"""A block of standard 2d convolutions.""" |
return conv_block_internal(conv, inputs, filters,
dilation_rates_and_kernel_sizes, **kwargs) |
<SYSTEM_TASK:>
Implements a downwards-striding conv block, like Xception exit flow.
<END_TASK>
<USER_TASK:>
Description:
def conv_block_downsample(x,
kernel,
strides,
padding,
separability=0,
name=None,
reuse=None):
"""Implements a downwards-striding conv block, like Xception exit flow.""" |
with tf.variable_scope(
name, default_name="conv_block_downsample", values=[x], reuse=reuse):
hidden_size = int(x.get_shape()[-1])
res = conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
strides=strides,
name="res_conv")
x = subseparable_conv_block(
x,
hidden_size, [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv0")
x = subseparable_conv_block(
x,
int(1.25 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv1")
x = pool(x, kernel, "MAX", padding, strides=strides)
x += res
x = subseparable_conv_block(
x,
2 * hidden_size, [((1, 1), kernel)],
first_relu=False,
padding=padding,
separability=separability,
name="conv2")
x = subseparable_conv_block(
x,
int(2.5 * hidden_size), [((1, 1), kernel)],
padding=padding,
separability=separability,
name="conv3")
return x |
<SYSTEM_TASK:>
Create Tensor of sinusoids of different frequencies.
<END_TASK>
<USER_TASK:>
Description:
def get_timing_signal(length,
min_timescale=1,
max_timescale=1e4,
num_timescales=16):
"""Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales)
""" |
positions = to_float(tf.range(length))
log_timescale_increment = (
math.log(max_timescale / min_timescale) / (num_timescales - 1))
inv_timescales = min_timescale * tf.exp(
to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) |
<SYSTEM_TASK:>
Input embeddings -> padding mask.
<END_TASK>
<USER_TASK:>
Description:
def mask_from_embedding(emb):
"""Input embeddings -> padding mask.
We have hacked symbol_modality to return all-zero embeddings for padding.
Returns a mask with 0.0 in the padding positions and 1.0 elsewhere.
Args:
emb: a Tensor with shape [batch, width, height, depth].
Returns:
a 0.0/1.0 Tensor with shape [batch, width, height, 1].
""" |
return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True)) |
<SYSTEM_TASK:>
Compute the length of each sequence in the batch.
<END_TASK>
<USER_TASK:>
Description:
def length_from_embedding(emb):
"""Compute the length of each sequence in the batch.
Args:
emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth].
Returns:
a Tensor with shape [batch].
""" |
return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32) |
<SYSTEM_TASK:>
If necessary, zero out inputs to a conv for padding positions.
<END_TASK>
<USER_TASK:>
Description:
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
Tensor of the same shape as inputs.
""" |
if (kernel_size != 1 and kernel_size != (1, 1) and
nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
return inputs |
<SYSTEM_TASK:>
Dense layer with dropconnect.
<END_TASK>
<USER_TASK:>
Description:
def dense_dropconnect(inputs,
output_size,
dropconnect_dropout=0.0,
name="dense_dropconnect",
**kwargs):
"""Dense layer with dropconnect.""" |
if dropconnect_dropout != 0.0:
tf.logging.info("Applying dropconnect as the kernel regularization.")
kwargs["kernel_regularizer"] = functools.partial(
tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout)
return dense(inputs, output_size, use_bias=True, name=name, **kwargs) |
<SYSTEM_TASK:>
position-wise Feed-fwd GRU gates following the MPNN.
<END_TASK>
<USER_TASK:>
Description:
def gru_feedfwd(a_t, h_prev, filters, name=None):
"""position-wise Feed-fwd GRU gates following the MPNN.
Args:
a_t: Tensor of shape [batch, length, depth] of current input
h_prev: Tensor of shape [batch, length, depth] of prev input
filters: an integer specifying number of dimensions of the filters
name: A string
Returns:
h_t: [batch, length, filters] hidden state
""" |
with tf.variable_scope(name, default_name="GRU", values=[a_t, h_prev]):
# we use right matrix multiplication to handle batches
# W_z and W_r have shape 2d, d. U_z U_r have shape d,d
z_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_z") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_z")))
r_t = (
tf.sigmoid(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_r") +
tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_r")))
h_tilde = (
tf.tanh(
tpu_conv1d(a_t, filters, 1, padding="SAME", name="W") +
tpu_conv1d(r_t * h_prev, filters, 1, padding="SAME", name="U")))
h_t = (1. - z_t) * h_prev + z_t * h_tilde
return h_t |
<SYSTEM_TASK:>
Pad tensors x and y on axis 1 so that they have the same length.
<END_TASK>
<USER_TASK:>
Description:
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length.""" |
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", values=[x, y]):
x_length = shape_list(x)[axis]
y_length = shape_list(y)[axis]
if (isinstance(x_length, int) and isinstance(y_length, int) and
x_length == y_length and final_length_divisible_by == 1):
return x, y
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y |
<SYSTEM_TASK:>
Pad labels on the length dimension to match logits length.
<END_TASK>
<USER_TASK:>
Description:
def pad_with_zeros(logits, labels):
"""Pad labels on the length dimension to match logits length.""" |
with tf.name_scope("pad_with_zeros", values=[logits, labels]):
logits, labels = pad_to_same_length(logits, labels)
if len(labels.shape) == 3: # 2-d labels.
logits, labels = pad_to_same_length(logits, labels, axis=2)
return logits, labels |
<SYSTEM_TASK:>
Check that the value is nonnegative.
<END_TASK>
<USER_TASK:>
Description:
def check_nonnegative(value):
"""Check that the value is nonnegative.""" |
if isinstance(value, tf.Tensor):
with tf.control_dependencies([tf.assert_greater_equal(value, 0)]):
value = tf.identity(value)
elif value < 0:
raise ValueError("Value must be non-negative.")
return value |
<SYSTEM_TASK:>
Assign weight 1.0 to only examples from the given task.
<END_TASK>
<USER_TASK:>
Description:
def weights_multi_problem_all(labels, taskid=-1):
"""Assign weight 1.0 to only examples from the given task.""" |
taskid = check_nonnegative(taskid)
weights = to_float(tf.not_equal(labels, 0))
past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= to_float(tf.not_equal(labels, taskid))
non_taskid = to_float(labels)
example_mask = to_float(tf.not_equal(past_taskid * non_taskid, 0))
example_mask = tf.reduce_sum(example_mask, axis=1)
example_mask = to_float(
tf.greater(example_mask, tf.zeros_like(example_mask)))
return weights * tf.expand_dims(example_mask, axis=-1) |
<SYSTEM_TASK:>
Assign weight 1.0 to only the inputs for the given task.
<END_TASK>
<USER_TASK:>
Description:
def weights_multi_problem_input(labels, taskid=-1):
"""Assign weight 1.0 to only the inputs for the given task.""" |
taskid = check_nonnegative(taskid)
weights_all_tokens = weights_multi_problem_all(labels, taskid)
weights_target = weights_multi_problem(labels, taskid)
return weights_all_tokens - weights_target |
<SYSTEM_TASK:>
Assign weight 1.0 to the "target" part of the concatenated labels.
<END_TASK>
<USER_TASK:>
Description:
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
""" |
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one,
[[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = to_float(tf.logical_and(nonboilerplate, in_target))
return ret |
<SYSTEM_TASK:>
Discretized mixture of logistics loss.
<END_TASK>
<USER_TASK:>
Description:
def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True):
"""Discretized mixture of logistics loss.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of 8-bit pixel
intensities. The computation assumes channels is 3.
weights_fn: A function of labels, returning a Tensor of shape
[batch, height, width] which weights each loss term. Default is to scale
each loss term by 1/3 so that they capture the average across channels.
reduce_sum: A boolean, to return scalar loss instead of per position.
Returns:
Tuple of loss tensors for numerator and denominator, each a scalar if
reduce_sum else of shape [batch, height, width]. The sum of their divisions
is the number of nats for each pixel in labels.
""" |
real_labels = convert_rgb_to_symmetric_real(labels)
dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels)
weights = weights_fn(labels)
loss_num = weights * dml_loss_value
loss_den = weights_nonzero(weights)
if reduce_sum:
loss_num = tf.reduce_sum(loss_num)
loss_den = tf.reduce_sum(loss_den)
return loss_num, loss_den |
<SYSTEM_TASK:>
Splits input tensor into parameters of discretized mixture logistic.
<END_TASK>
<USER_TASK:>
Description:
def split_to_discretized_mix_logistic_params(inputs):
"""Splits input tensor into parameters of discretized mixture logistic.
Args:
inputs: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
Returns:
Tuple of unconstrained mixture probabilities, locations, scales, and
coefficient parameters of the distribution. The mixture probability has
shape [batch, height, width, num_mixtures]. Other parameters have shape
[batch, height, width, num_mixtures, 3].
""" |
batch, height, width, output_dim = shape_list(inputs) # pylint: disable=unbalanced-tuple-unpacking
num_mixtures = output_dim // 10
logits, locs, log_scales, coeffs = tf.split(
inputs,
num_or_size_splits=[
num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3
],
axis=-1)
split_shape = [batch, height, width, num_mixtures, 3]
locs = tf.reshape(locs, split_shape)
log_scales = tf.reshape(log_scales, split_shape)
log_scales = tf.maximum(log_scales, -7.)
coeffs = tf.reshape(coeffs, split_shape)
coeffs = tf.tanh(coeffs)
return logits, locs, log_scales, coeffs |
<SYSTEM_TASK:>
Computes negative log probability for the discretized mixture of logistics.
<END_TASK>
<USER_TASK:>
Description:
def discretized_mix_logistic_loss(pred, labels):
"""Computes negative log probability for the discretized mixture of logistics.
The distribution of a whole pixel is a mixture of 3-dimensional discretized
logistic distributions. The 3-D discretized logistic factorizes as 3 1-D
discretized logistic distributions, one for each channel. It defines
```none
P(X = x)
= sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k])
= sum_{k=1}^K probs[k] * [
prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ]
```
The means tensor is a linear combination of location parameters and previous
channels. The discretized logistic distribution assigns probability mass to an
event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X < x - 0.5) for 1 < x <
254; P(X <= 0.5) for x = 0; and 1 - P(X < 245.5) for x = 255. Instead of
8-bit inputs, this implementation assumes the events are rescaled to [-1, 1].
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
labels: A [batch, height, width, channels] tensor of true pixel intensities
rescaled to [-1, 1]. The computation assumes channels is 3.
Returns:
A [batch, height, width] tensor of the negative log conditional probability
of each pixel given all previous pixels.
""" |
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Tile labels to broadcast compute across the mixture dimension.
batch, height, width, num_mixtures = shape_list(logits) # pylint: disable=unbalanced-tuple-unpacking
labels = tf.tile(
tf.reshape(labels, [batch, height, width, 1, 3]),
[1, 1, 1, num_mixtures, 1])
# p(x) = sigmoid((x - means_i + 1/255.)/scale_i) -
# sigmoid((x - means_i - 1/255.)/scale_i)
# for each channel i. The means are linearly parameterized.
means_0 = locs[..., 0]
means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0]
means_2 = (
locs[..., 2] + coeffs[..., 1] * labels[..., 0] +
coeffs[..., 2] * labels[..., 1])
means = tf.stack([means_0, means_1, means_2], axis=-1)
centered_labels = labels - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
# Compute log probability for edge case of 0 (before scaling), 255 (before
# scaling), and all other cases respectively.
log_prob_0 = plus_in - tf.nn.softplus(plus_in)
log_prob_255 = -tf.nn.softplus(min_in)
prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12)
log_prob_event = tf.log(prob_event)
# Robustly select log-prob based on numerical edge-cases: (a) [-1, -1+eps);
# (b) (1-eps, 1]; (c) NaNs during `tf.gradients` of `tf.select`, which may
# cause `tf.log(0.)`; (d) p(x) < 1e-5.
mid_in = inv_stdv * centered_labels
log_prob_event_approx = (
mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5))
log_probs = tf.where(
labels < -0.999, log_prob_0,
tf.where(
labels > 0.999, log_prob_255,
tf.where(prob_event > 1e-5, log_prob_event, log_prob_event_approx)))
# Sum over channels and compute log-probability of each mixture.
log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1)
output = -tf.reduce_logsumexp(log_probs, axis=-1)
return output |
<SYSTEM_TASK:>
Sampling from a discretized mixture of logistics.
<END_TASK>
<USER_TASK:>
Description:
def sample_from_discretized_mix_logistic(pred, seed=None):
"""Sampling from a discretized mixture of logistics.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
seed: Random seed.
Returns:
A tensor of shape [batch, height, width, 3] with real intensities scaled
between -1 and 1.
""" |
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
# Sample mixture indicator given logits using the gumbel max trick.
num_mixtures = shape_list(logits)[-1]
gumbel_noise = -tf.log(-tf.log(
tf.random_uniform(
tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))
sel = tf.one_hot(
tf.argmax(logits + gumbel_noise, -1),
depth=num_mixtures,
dtype=tf.float32)
# Select mixture component's parameters.
sel = tf.expand_dims(sel, -1)
locs = tf.reduce_sum(locs * sel, 3)
log_scales = tf.reduce_sum(log_scales * sel, 3)
coeffs = tf.reduce_sum(coeffs * sel, 3)
# Sample from 3-D logistic & clip to interval. Note we don't round to the
# nearest 8-bit value when sampling.
uniform_noise = tf.random_uniform(
tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)
logistic_noise = tf.log(uniform_noise) - tf.log1p(-uniform_noise)
x = locs + tf.exp(log_scales) * logistic_noise
x0 = x[..., 0]
x1 = x[..., 1] + coeffs[..., 0] * x0
x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1
x = tf.stack([x0, x1, x2], axis=-1)
x = tf.clip_by_value(x, -1., 1.)
return x |
<SYSTEM_TASK:>
Cross entropy with label smoothing to limit over-confidence.
<END_TASK>
<USER_TASK:>
Description:
def smoothing_cross_entropy(logits,
labels,
vocab_size,
confidence,
gaussian=False):
"""Cross entropy with label smoothing to limit over-confidence.
Args:
logits: Tensor of shape [batch_size, ?, ?, ?, vocab_size].
labels: Tensor of shape [batch_size, ?, ?, ?].
vocab_size: Tensor representing the size of the vocabulary.
confidence: Used to determine on and off values for label smoothing.
If `gaussian` is true, `confidence` is the variance to the Gaussian
distribution.
gaussian: Uses a Gaussian distribution for label smoothing
Returns:
Tensor of shape [batch_size, ?, ?, ?].
""" |
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
low_confidence = (1.0 - confidence) / to_float(vocab_size - 1)
# Normalizing constant is the best cross-entropy value with soft targets.
# We subtract it just for readability, makes no difference on learning.
normalizing = -(
confidence * tf.log(confidence) + to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
if gaussian and confidence > 0.0:
labels = tf.cast(labels, tf.float32)
normal_dist = tfp.distributions.Normal(loc=labels, scale=confidence)
# Locations to evaluate the probability distributions.
soft_targets = normal_dist.prob(
tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None])
# Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match
# logits: [batch_size, ?, ?, ?, vocab_size]
soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0])
else:
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
return xentropy - normalizing |
<SYSTEM_TASK:>
Pool elements across the last dimension.
<END_TASK>
<USER_TASK:>
Description:
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
"""Pool elements across the last dimension.
Useful to convert a list of vectors into a single vector so as
to get a representation of a set.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: the pooling type to use, MAX or AVR
mask: A tensor of shape [batch_size, sequence_length] containing a
mask for the inputs with 1's for existing elements, and 0's elsewhere.
Returns:
A tensor of shape [batch_size, input_dims] containing the sequences of
transformed vectors.
""" |
with tf.name_scope("global_pool", values=[inputs]):
if mask is not None:
mask = tf.expand_dims(mask, axis=2)
inputs = tf.multiply(inputs, mask)
if pooling_type == "MAX":
# A tf.pool can be used here, but reduce is cleaner
output = tf.reduce_max(inputs, axis=1)
elif pooling_type == "AVR":
if mask is not None:
# Some elems are dummy elems so we can't just reduce the average.
output = tf.reduce_sum(inputs, axis=1)
num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)
output = tf.div(output, tf.maximum(num_elems, 1))
else:
output = tf.reduce_mean(inputs, axis=1)
return output |
<SYSTEM_TASK:>
Same global pool, but only for the elements up to the current element.
<END_TASK>
<USER_TASK:>
Description:
def running_global_pool_1d(inputs, pooling_type="MAX"):
"""Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'.
""" |
del pooling_type
with tf.name_scope("running_global_pool", values=[inputs]):
scan_fct = tf.maximum
# Permute inputs so seq_length is first.
elems = tf.transpose(inputs, [1, 0, 2])
# Perform scan.
cumulatives = tf.scan(scan_fct, elems, swap_memory=True)
# Permute output to get back to original order.
output = tf.transpose(cumulatives, [1, 0, 2])
return output |
<SYSTEM_TASK:>
Basic layer type for doing funky things with sets.
<END_TASK>
<USER_TASK:>
Description:
def linear_set_layer(layer_size,
inputs,
context=None,
activation_fn=tf.nn.relu,
dropout=0.0,
name=None):
"""Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors.
""" |
with tf.variable_scope(
name, default_name="linear_set_layer", values=[inputs]):
# Apply 1D convolution to apply linear filter to each element
# along the 2nd dimension.
outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv")
# Apply the context if it exists.
if context is not None:
# Unfortunately tf doesn't support broadcasting via concat, but we can
# simply add the transformed context to get the same effect.
if len(context.get_shape().as_list()) == 2:
context = tf.expand_dims(context, axis=1)
cont_tfm = conv1d(
context, layer_size, 1, activation=None, name="cont_conv")
outputs += cont_tfm
if activation_fn is not None:
outputs = activation_fn(outputs)
if dropout != 0.0:
outputs = tf.nn.dropout(outputs, 1.0 - dropout)
return outputs |
<SYSTEM_TASK:>
State container for fn_device_dependency.
<END_TASK>
<USER_TASK:>
Description:
def fn_device_dependency_dict():
"""State container for fn_device_dependency.""" |
default_graph = tf.get_default_graph()
if not hasattr(default_graph, "dependency_dict"):
default_graph.dependency_dict = collections.defaultdict(list)
return default_graph.dependency_dict |
<SYSTEM_TASK:>
Add control deps for name and device.
<END_TASK>
<USER_TASK:>
Description:
def fn_device_dependency(name, device=""):
"""Add control deps for name and device.""" |
key = name + "_" + device
outs = []
def body():
with tf.control_dependencies(fn_device_dependency_dict()[key]):
yield outs
assert outs
deps = outs
if isinstance(outs[0], (list, tuple)):
assert len(outs) == 1
deps = outs[0]
fn_device_dependency_dict()[key] = deps
if device:
with tf.device(device):
return body()
else:
return body() |
<SYSTEM_TASK:>
Find the underlying variable ref.
<END_TASK>
<USER_TASK:>
Description:
def underlying_variable_ref(t):
"""Find the underlying variable ref.
Traverses through Identity, ReadVariableOp, and Enter ops.
Stops when op type has Variable or VarHandle in name.
Args:
t: a Tensor
Returns:
a Tensor that is a variable ref, or None on error.
""" |
while t.op.type in ["Identity", "ReadVariableOp", "Enter"]:
t = t.op.inputs[0]
op_type = t.op.type
if "Variable" in op_type or "VarHandle" in op_type:
return t
else:
return None |
<SYSTEM_TASK:>
Find the underlying tf.Variable object.
<END_TASK>
<USER_TASK:>
Description:
def underlying_variable(t):
"""Find the underlying tf.Variable object.
Args:
t: a Tensor
Returns:
tf.Variable.
""" |
t = underlying_variable_ref(t)
assert t is not None
# make sure that the graph has a variable index and that it is up-to-date
if not hasattr(tf.get_default_graph(), "var_index"):
tf.get_default_graph().var_index = {}
var_index = tf.get_default_graph().var_index
for v in tf.global_variables()[len(var_index):]:
var_index[v.name] = v
return var_index[t.name] |
<SYSTEM_TASK:>
Split approximately equally into num_splits parts.
<END_TASK>
<USER_TASK:>
Description:
def approximate_split(x, num_splits, axis=0):
"""Split approximately equally into num_splits parts.
Args:
x: a Tensor
num_splits: an integer
axis: an integer.
Returns:
a list of num_splits Tensors.
""" |
size = shape_list(x)[axis]
size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]
return tf.split(x, size_splits, axis=axis) |
<SYSTEM_TASK:>
Decorator to create a subgraph with a custom gradient function.
<END_TASK>
<USER_TASK:>
Description:
def fn_with_custom_grad(grad_fn, use_global_vars=False):
"""Decorator to create a subgraph with a custom gradient function.
The subgraph created by the decorated function is NOT put in a Defun and so
does not suffer from the limitations of the Defun (all subgraph ops on the
same device, no summaries).
Args:
grad_fn: function with signature
(inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
Decorator for function such that the gradient is defined by grad_fn.
""" |
def dec(fn):
@functools.wraps(fn)
def wrapped(*args):
return _fn_with_custom_grad(
fn, args, grad_fn, use_global_vars=use_global_vars)
return wrapped
return dec |
<SYSTEM_TASK:>
Create a subgraph with a custom gradient.
<END_TASK>
<USER_TASK:>
Description:
def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False):
"""Create a subgraph with a custom gradient.
Args:
fn: function that takes inputs as arguments and produces 1 or more Tensors.
inputs: list<Tensor>, will be passed as fn(*inputs).
grad_fn: function with signature
(inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars),
all of which are lists of Tensors.
use_global_vars: if True, variables will be the global variables created.
If False, will be the trainable variables.
Returns:
fn(*inputs)
""" |
vs = tf.get_variable_scope()
get_vars_fn = (
vs.global_variables if use_global_vars else vs.trainable_variables)
len_before_vars = len(get_vars_fn())
inputs = list(inputs)
outputs = fn(*inputs)
train_vars = get_vars_fn()[len_before_vars:]
if grad_fn is None:
return outputs
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
outputs = list(outputs)
defun_inputs = [inputs, train_vars, outputs]
def custom_grad_fn(op, *dys):
"""Custom grad fn applying grad_fn for identity Defun."""
fn_inputs, fn_vars, fn_outputs = tf.contrib.framework.nest.pack_sequence_as(
defun_inputs, list(op.inputs))
dys = list(dys)
assert len(fn_outputs) == len(outputs)
assert len(fn_outputs) == len(dys)
grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys)
grad_outputs = [None] * len(fn_outputs)
return tuple(grad_inputs + grad_vars + grad_outputs)
# The Defun takes as input the original inputs, the trainable variables
# created in fn, and the outputs. In the forward it passes through the
# outputs. In the backwards, it produces gradients for the original inputs
# and the trainable variables.
in_types = [t.dtype for t in inputs]
out_types = [t.dtype for t in outputs]
var_types = [t.dtype for t in train_vars]
@function.Defun(
*(in_types + var_types + out_types),
func_name="identity_custom_grad%d" % ops.uid(),
python_grad_func=custom_grad_fn,
shape_func=lambda _: [t.get_shape() for t in outputs])
def identity(*args):
_, _, outs = tf.contrib.framework.nest.pack_sequence_as(defun_inputs, args)
return tuple([tf.identity(t) for t in outs])
flat_inputs = tf.contrib.framework.nest.flatten(defun_inputs)
id_out = identity(*flat_inputs)
return id_out |
<SYSTEM_TASK:>
Return list of dims, statically where possible.
<END_TASK>
<USER_TASK:>
Description:
def shape_list(x):
"""Return list of dims, statically where possible.""" |
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, dim in enumerate(static):
if dim is None:
dim = shape[i]
ret.append(dim)
return ret |
<SYSTEM_TASK:>
Matrix band part of ones.
<END_TASK>
<USER_TASK:>
Description:
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
""" |
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band |
<SYSTEM_TASK:>
Reshapes a to match the shape of b.
<END_TASK>
<USER_TASK:>
Description:
def reshape_like_all_dims(a, b):
"""Reshapes a to match the shape of b.""" |
ret = tf.reshape(a, tf.shape(b))
if not tf.executing_eagerly():
ret.set_shape(b.get_shape())
return ret |
<SYSTEM_TASK:>
Decorator that recomputes the function on the backwards pass.
<END_TASK>
<USER_TASK:>
Description:
def recompute_grad(fn):
"""Decorator that recomputes the function on the backwards pass.
Args:
fn: a function that takes Tensors (all as positional arguments) and returns
a tuple of Tensors.
Returns:
A wrapped fn that is identical to fn when called, but its activations will
be discarded and recomputed on the backwards pass (i.e. on a call to
tf.gradients).
""" |
@functools.wraps(fn)
def wrapped(*args):
return _recompute_grad(fn, args)
return wrapped |
<SYSTEM_TASK:>
Multiply a batch of input matrices by a batch of parameter matrices.
<END_TASK>
<USER_TASK:>
Description:
def batch_dense(inputs,
units,
activation=None,
kernel_initializer=None,
reuse=None,
name=None):
"""Multiply a batch of input matrices by a batch of parameter matrices.
Each input matrix is multiplied by the corresponding parameter matrix.
This is useful in a mixture-of-experts where the batch represents different
experts with different inputs.
Args:
inputs: a Tensor with shape [batch, length, input_units]
units: an integer
activation: an optional activation function to apply to the output
kernel_initializer: an optional initializer
reuse: whether to reuse the varaible scope
name: an optional string
Returns:
a Tensor with shape [batch, length, units]
Raises:
ValueError: if the "batch" or "input_units" dimensions of inputs are not
statically known.
""" |
inputs_shape = shape_list(inputs)
if len(inputs_shape) != 3:
raise ValueError("inputs must have 3 dimensions")
batch = inputs_shape[0]
input_units = inputs_shape[2]
if not isinstance(batch, int) or not isinstance(input_units, int):
raise ValueError("inputs must have static dimensions 0 and 2")
with tf.variable_scope(
name,
default_name="batch_dense",
values=[inputs],
reuse=reuse,
dtype=inputs.dtype):
if kernel_initializer is None:
kernel_initializer = tf.random_normal_initializer(
stddev=input_units**-0.5)
w = tf.get_variable(
"w", [batch, input_units, units],
initializer=kernel_initializer,
dtype=inputs.dtype)
y = tf.matmul(inputs, w)
if activation is not None:
y = activation(y)
return y |
<SYSTEM_TASK:>
Mix starting with x2, mixing mixing, going towards x1.
<END_TASK>
<USER_TASK:>
Description:
def mix(x1,
x2,
steps,
is_training,
min_prob=0.0,
max_prob=1.0,
mode="lin",
simple=False,
broadcast_last=False):
"""Mix starting with x2, mixing mixing, going towards x1.""" |
with tf.name_scope("mix"):
if not is_training:
if max_prob >= 1.0:
return x1
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = to_float(tf.less(alpha, max_prob))
return alpha * x1 + (1.0 - alpha) * x2
def get_res():
"""Create the result.
Separate function to speed it up later (see below).
Returns:
Tensor of mixed inputs.
"""
if mode == "lin":
alpha_p = inverse_lin_decay(steps)
else:
alpha_p = inverse_exp_decay(steps)
alpha_p = alpha_p * (max_prob - min_prob) + min_prob
if simple:
return alpha_p * x1 + (1.0 - alpha_p) * x2
alpha_shape = shape_list(x1)
if broadcast_last:
alpha_shape = alpha_shape[:-1] + [1]
alpha = tf.random_uniform(alpha_shape)
alpha = to_float(tf.less(alpha, alpha_p))
return alpha * x1 + (1.0 - alpha) * x2
if max_prob < 1.0:
return get_res()
# Prevent sampling after steps is passed to speed it up.
if is_xla_compiled():
return get_res()
else:
cur_step = tf.train.get_global_step()
if cur_step is None:
return x1 # Step not available, probably eval mode, don't mix.
return tf.cond(tf.less(cur_step, steps), get_res, lambda: x1) |
<SYSTEM_TASK:>
Gaussian Error Linear Unit.
<END_TASK>
<USER_TASK:>
Description:
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
x with the GELU activation applied.
""" |
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf |
<SYSTEM_TASK:>
Argmax along with the value.
<END_TASK>
<USER_TASK:>
Description:
def argmax_with_score(logits, axis=None):
"""Argmax along with the value.""" |
axis = axis or len(logits.get_shape()) - 1
predictions = tf.argmax(logits, axis=axis)
logits_shape = shape_list(logits)
prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]
prefix_size = 1
for d in prefix_shape:
prefix_size *= d
# Flatten to extract scores
flat_logits = tf.reshape(logits, [prefix_size, vocab_size])
flat_predictions = tf.reshape(predictions, [prefix_size])
flat_indices = tf.stack(
[tf.range(tf.to_int64(prefix_size)),
tf.to_int64(flat_predictions)],
axis=1)
flat_scores = tf.gather_nd(flat_logits, flat_indices)
# Unflatten
scores = tf.reshape(flat_scores, prefix_shape)
return predictions, scores |
<SYSTEM_TASK:>
Compute the k-th top element of x on the last axis iteratively.
<END_TASK>
<USER_TASK:>
Description:
def top_kth_iterative(x, k):
"""Compute the k-th top element of x on the last axis iteratively.
This assumes values in x are non-negative, rescale if needed.
It is often faster than tf.nn.top_k for small k, especially if k < 30.
Note: this does not support back-propagation, it stops gradients!
Args:
x: a Tensor of non-negative numbers of type float.
k: a python integer.
Returns:
a float tensor of the same shape as x but with 1 on the last axis
that contains the k-th largest number in x.
""" |
# The iterative computation is as follows:
#
# cur_x = x
# for _ in range(k):
# top_x = maximum of elements of cur_x on the last axis
# cur_x = cur_x where cur_x < top_x and 0 everywhere else (top elements)
#
# We encode this computation in a TF graph using tf.foldl, so the inner
# part of the above loop is called "next_x" and tf.foldl does the loop.
def next_x(cur_x, _):
top_x = tf.reduce_max(cur_x, axis=-1, keep_dims=True)
return cur_x * to_float(cur_x < top_x)
# We only do k-1 steps of the loop and compute the final max separately.
fin_x = tf.foldl(next_x, tf.range(k - 1), initializer=tf.stop_gradient(x),
parallel_iterations=2, back_prop=False)
return tf.stop_gradient(tf.reduce_max(fin_x, axis=-1, keep_dims=True)) |
<SYSTEM_TASK:>
find max and argmax over the last dimension.
<END_TASK>
<USER_TASK:>
Description:
def top_1_tpu(inputs):
"""find max and argmax over the last dimension.
Works well on TPU
Args:
inputs: A tensor with shape [..., depth]
Returns:
values: a Tensor with shape [...]
indices: a Tensor with shape [...]
""" |
inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)
mask = tf.to_int32(tf.equal(inputs_max, inputs))
index = tf.range(tf.shape(inputs)[-1]) * mask
return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1) |
<SYSTEM_TASK:>
Use indices to index into the last axis of x.
<END_TASK>
<USER_TASK:>
Description:
def index_last_dim_with_indices(x, indices):
"""Use indices to index into the last axis of x.
This can be useful for recovering the actual probabilities of a sample from a
probability distribution.
Args:
x: Tensor, n-d.
indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1)
dimensions of x. The values of indices will be used to index into the last
axis of x.
Returns:
Tensor, (n-1)-d.
""" |
assert len(x.shape) == len(indices.shape) + 1
x_shape = shape_list(x)
vocab_size = x_shape[-1]
flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size])
flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])])
idx = tf.stack(
[
tf.range(tf.to_int64(shape_list(flat_indices)[0])),
tf.to_int64(flat_indices)
],
axis=1)
flat_x_idx = tf.gather_nd(flat_x, idx)
x_idx = tf.reshape(flat_x_idx, x_shape[:-1])
return x_idx |
<SYSTEM_TASK:>
Is this an appropriate context to generate summaries.
<END_TASK>
<USER_TASK:>
Description:
def should_generate_summaries():
"""Is this an appropriate context to generate summaries.
Returns:
a boolean
""" |
name_scope = tf.contrib.framework.get_name_scope()
if name_scope and "while/" in name_scope:
# Summaries don't work well within tf.while_loop()
return False
if tf.get_variable_scope().reuse:
# Avoid generating separate summaries for different data shards
return False
return True |
<SYSTEM_TASK:>
Reshapes a to match the shape of b in all but the last dimension.
<END_TASK>
<USER_TASK:>
Description:
def reshape_like(a, b):
"""Reshapes a to match the shape of b in all but the last dimension.""" |
ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0))
if not tf.executing_eagerly():
ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:])
return ret |
<SYSTEM_TASK:>
Summarize the video using image summaries starting with prefix.
<END_TASK>
<USER_TASK:>
Description:
def summarize_video(video, prefix, max_outputs=1):
"""Summarize the video using image summaries starting with prefix.""" |
video_shape = shape_list(video)
if len(video_shape) != 5:
raise ValueError("Assuming videos given as tensors in the format "
"[batch, time, height, width, channels] but got one "
"of shape: %s" % str(video_shape))
if tf.executing_eagerly():
return
if video.get_shape().as_list()[1] is None:
tf.summary.image(
"%s_last_frame" % prefix,
tf.cast(video[:, -1, :, :, :], tf.uint8),
max_outputs=max_outputs)
else:
for k in range(video_shape[1]):
tf.summary.image(
"%s_frame_%d" % (prefix, k),
tf.cast(video[:, k, :, :, :], tf.uint8),
max_outputs=max_outputs) |
<SYSTEM_TASK:>
Pad x to be even-sized on axis 1 and 2, but only if necessary.
<END_TASK>
<USER_TASK:>
Description:
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary.""" |
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x |
<SYSTEM_TASK:>
Generalized convolution layer.
<END_TASK>
<USER_TASK:>
Description:
def general_conv(x,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding="VALID",
name="conv",
do_norm="instance",
do_relu=True,
relufactor=0):
"""Generalized convolution layer.""" |
with tf.variable_scope(name):
x = layers().Conv2D(
num_filters,
filter_size,
stride,
padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
bias_initializer=tf.constant_initializer(0.0))(x)
if do_norm == "layer":
x = layer_norm(x)
elif do_norm == "instance":
x = instance_norm(x)
if do_relu:
if relufactor == 0:
x = tf.nn.relu(x, "relu")
else:
x = lrelu(x, leak=relufactor)
return x |
<SYSTEM_TASK:>
Patch descriminator.
<END_TASK>
<USER_TASK:>
Description:
def patch_discriminator(x, filters=64, filter_size=5, n=4,
name="patch_discrim"):
"""Patch descriminator.""" |
with tf.variable_scope(name):
x_shape = shape_list(x)
spatial_dims = [x_shape[1] // 4, x_shape[2] // 4]
x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]])
for i in range(n):
x = general_conv(
x=x,
num_filters=filters * 2**i,
filter_size=filter_size,
stride=2 if i != n - 1 else 1,
stddev=0.02,
padding="SAME",
name="c%d" % i,
do_norm="instance" if i != 0 else False,
do_relu=i != n - 1,
relufactor=0.2)
x = tf.reduce_mean(x, [1, 2])
return x |
<SYSTEM_TASK:>
Mean and attention to reduce spatial dimensions.
<END_TASK>
<USER_TASK:>
Description:
def mean_with_attention(x, name, num_heads=4):
"""Mean and attention to reduce spatial dimensions.""" |
with tf.variable_scope(name):
shape = shape_list(x)
m = tf.reduce_mean(x, [1, 2])
a = layers().Dense(num_heads, name="mean_attn")(x)
s = tf.reshape(a, [shape[0], -1, num_heads])
s = tf.nn.softmax(s, axis=1)
s = tf.reshape(s, shape[:-1] + [1, num_heads])
am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2])
l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1)
return layers().Dense(2 * shape[-1], name="mean_attn_final")(
tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]])) |
<SYSTEM_TASK:>
A simple single-layer convolutional discriminator.
<END_TASK>
<USER_TASK:>
Description:
def single_discriminator(x, filters=128, kernel_size=8,
strides=4, pure_mean=False):
"""A simple single-layer convolutional discriminator.""" |
with tf.variable_scope("discriminator"):
net = layers().Conv2D(
filters, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net = tf.reduce_mean(net, [1, 2])
else:
net = mean_with_attention(net, "mean_with_attention")
return net |
<SYSTEM_TASK:>
A convolutional discriminator with 2 layers and concatenated output.
<END_TASK>
<USER_TASK:>
Description:
def double_discriminator(x, filters1=128, filters2=None,
kernel_size=8, strides=4, pure_mean=False):
"""A convolutional discriminator with 2 layers and concatenated output.""" |
if filters2 is None:
filters2 = 4 * filters1
with tf.variable_scope("discriminator"):
batch_size = shape_list(x)[0]
net = layers().Conv2D(
filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
if pure_mean:
net1 = tf.reduce_mean(net, [1, 2])
else:
net1 = mean_with_attention(net, "mean_with_attention1")
tf.reshape(net, [batch_size, -1])
net = tf.nn.relu(net)
net = layers().Conv2D(
filters2, kernel_size, strides=strides, padding="SAME", name="conv2")(x)
if pure_mean:
net2 = tf.reduce_mean(net, [1, 2])
else:
net2 = mean_with_attention(net, "mean_with_attention2")
return tf.concat([net1, net2], axis=-1) |
<SYSTEM_TASK:>
Upscaling the image by a factor of f.
<END_TASK>
<USER_TASK:>
Description:
def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR):
"""Upscaling the image by a factor of f.""" |
height, width = shape_list(inputs)[1:3] # pylint: disable=unbalanced-tuple-unpacking
return tf.image.resize_images(inputs, (height * f, width * f), method) |
<SYSTEM_TASK:>
Upsamples the given inputs.
<END_TASK>
<USER_TASK:>
Description:
def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"):
"""Upsamples the given inputs.
Args:
net: A Tensor of size [batch_size, height, width, filters].
num_outputs: The number of output filters.
stride: A list of 2 scalars or a 1x2 Tensor indicating the scale,
relative to the inputs, of the output dimensions. For example, if kernel
size is [2, 3], then the output height and width will be twice and three
times the input size.
method: The upsampling method: 'nn_upsample_conv',
'bilinear_upsample_conv', or 'conv2d_transpose'.
Returns:
A Tensor which was upsampled using the specified method.
Raises:
ValueError: if `method` is not recognized.
""" |
with tf.variable_scope("upconv"):
net_shape = tf.shape(net)
height = net_shape[1]
width = net_shape[2]
# Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a
# 3x3 "valid" convolution produce an output with the same dimension as the
# input.
spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])
if method == "nn_upsample_conv":
net = tf.image.resize_nearest_neighbor(
net, [stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = layers().Conv2D(
num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif method == "bilinear_upsample_conv":
net = tf.image.resize_bilinear(net,
[stride[0] * height, stride[1] * width])
net = tf.pad(net, spatial_pad_1, "REFLECT")
net = layers().Conv2D(
num_outputs, (3, 3), activation=tf.nn.relu)(net)
elif method == "conv2d_transpose":
# This corrects 1 pixel offset for images with even width and height.
# conv2d is left aligned and conv2d_transpose is right aligned for even
# sized images (while doing "SAME" padding).
# Note: This doesn"t reflect actual model in paper.
net = layers().Conv2DTranspose(
num_outputs, (3, 3), strides=stride, activation=tf.nn.relu)(net)
net = net[:, 1:, 1:, :]
else:
raise ValueError("Unknown method: [%s]" % method)
return net |
<SYSTEM_TASK:>
Apply targeted dropout to the weights of a convolution.
<END_TASK>
<USER_TASK:>
Description:
def td_conv(inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=(1, 1),
padding="valid",
data_format="channels_last",
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
name=None,
reuse=None):
"""Apply targeted dropout to the weights of a convolution.""" |
with tf.variable_scope(name, default_name="td_conv", reuse=reuse):
nhwc = data_format == "channels_last"
in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1]
kernel_shape = [kernel_size, kernel_size, in_dim, filters]
w = tf.get_variable(
"DW", shape=kernel_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable("b", shape=[filters], initializer=bias_initializer)
if keep_prob < 1.0:
w = targeted_dropout(
w,
targeting_count,
keep_prob,
targeting_fn,
is_training,
do_prune=do_prune)
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate, dilation_rate]
if nhwc:
strides = [1, strides[0], strides[1], 1]
dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1]
else:
strides = [1, 1, strides[0], strides[1]]
dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]]
y = tf.nn.conv2d(
inputs,
w,
strides,
padding,
data_format="NHWC" if nhwc else "NCHW",
dilations=dilation_rate,
name=None)
if use_bias:
y += b
if activation:
y = activation(y)
return y |
<SYSTEM_TASK:>
Applies targeted dropout.
<END_TASK>
<USER_TASK:>
Description:
def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
""" |
if not is_training and do_prune:
k = tf.round(to_float(k) * to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs |
<SYSTEM_TASK:>
Generate weights with normalization.
<END_TASK>
<USER_TASK:>
Description:
def _compute_weights(self):
"""Generate weights with normalization.""" |
with tf.variable_scope("compute_weights"):
self.layer.kernel = tf.nn.l2_normalize(
self.layer.v, axis=self.norm_axes) * self.layer.g |
<SYSTEM_TASK:>
Set the norm of the weight vector.
<END_TASK>
<USER_TASK:>
Description:
def _init_norm(self, weights):
"""Set the norm of the weight vector.""" |
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,)) |
<SYSTEM_TASK:>
Data dependent initialization for eager execution.
<END_TASK>
<USER_TASK:>
Description:
def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution.""" |
with tf.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = tf.moments(x_init, self.norm_axes)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = (-m_init * scale_init)
self.layer.activation = activation
self.initialized = True |
<SYSTEM_TASK:>
Calculate mean rewards from given epoch.
<END_TASK>
<USER_TASK:>
Description:
def compute_mean_reward(rollouts, clipped):
"""Calculate mean rewards from given epoch.""" |
reward_name = "reward" if clipped else "unclipped_reward"
rewards = []
for rollout in rollouts:
if rollout[-1].done:
rollout_reward = sum(getattr(frame, reward_name) for frame in rollout)
rewards.append(rollout_reward)
if rewards:
mean_rewards = np.mean(rewards)
else:
mean_rewards = 0
return mean_rewards |
<SYSTEM_TASK:>
Evaluate the PPO agent in the real environment.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir,
eval_fn=_eval_fn_with_learner
):
"""Evaluate the PPO agent in the real environment.""" |
tf.logging.info("Evaluating metric %s", get_metric_name(
sampling_temp, max_num_noops, clipped=False
))
eval_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
env = setup_env(
hparams, batch_size=hparams.eval_batch_size, max_num_noops=max_num_noops,
rl_env_max_episode_steps=hparams.eval_rl_env_max_episode_steps,
env_name=hparams.rl_env_name)
env.start_new_epoch(0)
eval_fn(env, hparams, eval_hparams, agent_model_dir, sampling_temp)
rollouts = env.current_epoch_rollouts()
env.close()
return tuple(
compute_mean_reward(rollouts, clipped) for clipped in (True, False)
) |
<SYSTEM_TASK:>
Evaluate the agent with multiple eval configurations.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_all_configs(
hparams, agent_model_dir, eval_fn=_eval_fn_with_learner
):
"""Evaluate the agent with multiple eval configurations.""" |
metrics = {}
# Iterate over all combinations of sampling temperatures and whether to do
# initial no-ops.
for sampling_temp in hparams.eval_sampling_temps:
# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.
for max_num_noops in set([hparams.eval_max_num_noops, 0]):
scores = evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn
)
for (score, clipped) in zip(scores, (True, False)):
metric_name = get_metric_name(sampling_temp, max_num_noops, clipped)
metrics[metric_name] = score
return metrics |
<SYSTEM_TASK:>
CamelCase game name with mode suffix.
<END_TASK>
<USER_TASK:>
Description:
def full_game_name(short_name):
"""CamelCase game name with mode suffix.
Args:
short_name: snake_case name without mode e.g "crazy_climber"
Returns:
full game name e.g. "CrazyClimberNoFrameskip-v4"
""" |
camel_game_name = misc_utils.snakecase_to_camelcase(short_name)
full_name = camel_game_name + ATARI_GAME_MODE
return full_name |
<SYSTEM_TASK:>
Chooses a random frame sequence of given length from a set of rollouts.
<END_TASK>
<USER_TASK:>
Description:
def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length):
"""Chooses a random frame sequence of given length from a set of rollouts.""" |
def choose_subsequence():
# TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over
# frames and not rollouts.
rollout = random.choice(rollouts)
try:
from_index = random.randrange(len(rollout) - subsequence_length + 1)
except ValueError:
# Rollout too short; repeat.
return choose_subsequence()
return rollout[from_index:(from_index + subsequence_length)]
return [choose_subsequence() for _ in range(num_subsequences)] |
<SYSTEM_TASK:>
Make frame chooser.
<END_TASK>
<USER_TASK:>
Description:
def make_initial_frame_chooser(
real_env, frame_stack_size, simulation_random_starts,
simulation_flip_first_random_for_beginning,
split=tf.estimator.ModeKeys.TRAIN,
):
"""Make frame chooser.
Args:
real_env: T2TEnv to take initial frames from.
frame_stack_size (int): Number of consecutive frames to extract.
simulation_random_starts (bool): Whether to choose frames at random.
simulation_flip_first_random_for_beginning (bool): Whether to flip the first
frame stack in every batch for the frames at the beginning.
split (tf.estimator.ModeKeys or None): Data split to take the frames from,
None means use all frames.
Returns:
Function batch_size -> initial_frames.
""" |
initial_frame_rollouts = real_env.current_epoch_rollouts(
split=split, minimal_rollout_frames=frame_stack_size,
)
def initial_frame_chooser(batch_size):
"""Frame chooser."""
deterministic_initial_frames =\
initial_frame_rollouts[0][:frame_stack_size]
if not simulation_random_starts:
# Deterministic starts: repeat first frames from the first rollout.
initial_frames = [deterministic_initial_frames] * batch_size
else:
# Random starts: choose random initial frames from random rollouts.
initial_frames = random_rollout_subsequences(
initial_frame_rollouts, batch_size, frame_stack_size
)
if simulation_flip_first_random_for_beginning:
# Flip first entry in the batch for deterministic initial frames.
initial_frames[0] = deterministic_initial_frames
return np.stack([
[frame.observation.decode() for frame in initial_frame_stack] # pylint: disable=g-complex-comprehension
for initial_frame_stack in initial_frames
])
return initial_frame_chooser |
<SYSTEM_TASK:>
Point-wise, hinge loss-like, difference between arrays.
<END_TASK>
<USER_TASK:>
Description:
def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8):
"""Point-wise, hinge loss-like, difference between arrays.
Args:
arr1: integer array to compare.
arr2: integer array to compare.
min_diff: minimal difference taken into consideration.
dtype: dtype of returned array.
Returns:
array
""" |
diff = np.abs(arr1.astype(np.int) - arr2, dtype=np.int)
return np.maximum(diff - min_diff, 0).astype(dtype) |
<SYSTEM_TASK:>
Runs a batch of rollouts from given initial observations.
<END_TASK>
<USER_TASK:>
Description:
def run_rollouts(
env, agent, initial_observations, step_limit=None, discount_factor=1.0,
log_every_steps=None, video_writers=(), color_bar=False,
many_rollouts_from_each_env=False
):
"""Runs a batch of rollouts from given initial observations.""" |
assert step_limit is not None or not many_rollouts_from_each_env, (
"When collecting many rollouts from each environment, time limit must "
"be set."
)
num_dones = 0
first_dones = np.array([False] * env.batch_size)
observations = initial_observations
step_index = 0
cum_rewards = np.zeros(env.batch_size)
for (video_writer, obs_stack) in zip(video_writers, initial_observations):
for (i, ob) in enumerate(obs_stack):
debug_frame = augment_observation(
ob, reward=0, cum_reward=0, frame_index=(-len(obs_stack) + i + 1),
bar_color=((0, 255, 0) if color_bar else None)
)
video_writer.write(debug_frame)
def proceed():
if step_index < step_limit:
return num_dones < env.batch_size or many_rollouts_from_each_env
else:
return False
while proceed():
act_kwargs = {}
if agent.needs_env_state:
act_kwargs["env_state"] = env.state
actions = agent.act(observations, **act_kwargs)
(observations, rewards, dones) = env.step(actions)
observations = list(observations)
now_done_indices = []
for (i, done) in enumerate(dones):
if done and (not first_dones[i] or many_rollouts_from_each_env):
now_done_indices.append(i)
first_dones[i] = True
num_dones += 1
if now_done_indices:
# Unless many_rollouts_from_each_env, reset only envs done the first time
# in this timestep to ensure that we collect exactly 1 rollout from each
# env.
reset_observations = env.reset(now_done_indices)
for (i, observation) in zip(now_done_indices, reset_observations):
observations[i] = observation
observations = np.array(observations)
cum_rewards[~first_dones] = (
cum_rewards[~first_dones] * discount_factor + rewards[~first_dones]
)
step_index += 1
for (video_writer, obs_stack, reward, cum_reward, done) in zip(
video_writers, observations, rewards, cum_rewards, first_dones
):
if done:
continue
ob = obs_stack[-1]
debug_frame = augment_observation(
ob, reward=reward, cum_reward=cum_reward,
frame_index=step_index, bar_color=((255, 0, 0) if color_bar else None)
)
video_writer.write(debug_frame)
# TODO(afrozm): Clean this up with tf.logging.log_every_n
if log_every_steps is not None and step_index % log_every_steps == 0:
tf.logging.info("Step %d, mean_score: %f", step_index, cum_rewards.mean())
return (observations, cum_rewards) |
<SYSTEM_TASK:>
Download corpora if necessary and unzip them.
<END_TASK>
<USER_TASK:>
Description:
def _maybe_download_corpora(tmp_dir, dataset_split):
"""Download corpora if necessary and unzip them.
Args:
tmp_dir: directory containing dataset.
dataset_split: whether we're in train/dev/test mode.
Returns:
List of all files generated and path to file containing
train/dev/test split info.
""" |
cnn_filename = "cnn_stories.tgz"
cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/")
dailymail_filename = "dailymail_stories.tgz"
dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/")
if not tf.gfile.Exists(cnn_finalpath):
cnn_file = generator_utils.maybe_download_from_drive(
tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)
with tarfile.open(cnn_file, "r:gz") as cnn_tar:
cnn_tar.extractall(tmp_dir)
if not tf.gfile.Exists(dailymail_finalpath):
dailymail_file = generator_utils.maybe_download_from_drive(
tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)
with tarfile.open(dailymail_file, "r:gz") as dailymail_tar:
dailymail_tar.extractall(tmp_dir)
cnn_files = tf.gfile.Glob(cnn_finalpath + "*")
dailymail_files = tf.gfile.Glob(dailymail_finalpath + "*")
all_files = cnn_files + dailymail_files
if dataset_split == problem.DatasetSplit.TRAIN:
urls_path = generator_utils.maybe_download(tmp_dir, "all_train.txt",
_TRAIN_URLS)
elif dataset_split == problem.DatasetSplit.EVAL:
urls_path = generator_utils.maybe_download(tmp_dir, "all_val.txt",
_DEV_URLS)
else:
urls_path = generator_utils.maybe_download(tmp_dir, "all_test.txt",
_TEST_URLS)
return all_files, urls_path |
<SYSTEM_TASK:>
Generate splits of the data.
<END_TASK>
<USER_TASK:>
Description:
def example_splits(url_file, all_files):
"""Generate splits of the data.""" |
def generate_hash(inp):
"""Generate a sha1 hash to match the raw url to the filename extracted."""
h = hashlib.sha1()
h.update(inp)
return h.hexdigest()
all_files_map = {f.split("/")[-1]: f for f in all_files}
urls = [line.strip().encode("utf-8") for line in tf.gfile.Open(url_file)]
filelist = []
for url in urls:
url_hash = generate_hash(url)
filename = url_hash + ".story"
if filename not in all_files_map:
tf.logging.info("Missing file: %s" % url)
continue
filelist.append(all_files_map[filename])
tf.logging.info("Found %d examples" % len(filelist))
return filelist |
<SYSTEM_TASK:>
Infer highest epoch number from file names in data_dir.
<END_TASK>
<USER_TASK:>
Description:
def infer_last_epoch_num(data_dir):
"""Infer highest epoch number from file names in data_dir.""" |
names = os.listdir(data_dir)
epochs_str = [re.findall(pattern=r".*\.(-?\d+)$", string=name)
for name in names]
epochs_str = sum(epochs_str, [])
return max([int(epoch_str) for epoch_str in epochs_str]) |
<SYSTEM_TASK:>
Load T2TGymEnv with data from one epoch.
<END_TASK>
<USER_TASK:>
Description:
def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):
"""Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env.
""" |
t2t_env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops
)
# Load data.
if which_epoch_data is not None:
if which_epoch_data == "last":
which_epoch_data = infer_last_epoch_num(data_dir)
assert isinstance(which_epoch_data, int), \
"{}".format(type(which_epoch_data))
t2t_env.start_new_epoch(which_epoch_data, data_dir)
else:
t2t_env.start_new_epoch(-999)
return t2t_env |
<SYSTEM_TASK:>
Wrap environment with gym.Monitor.
<END_TASK>
<USER_TASK:>
Description:
def wrap_with_monitor(env, video_dir):
"""Wrap environment with gym.Monitor.
Video recording provided by Monitor requires
1) both height and width of observation to be even numbers.
2) rendering of environment
Args:
env: environment.
video_dir: video directory.
Returns:
wrapped environment.
""" |
env = ExtendToEvenDimentions(env)
env = RenderObservations(env) # pylint: disable=redefined-variable-type
env = gym.wrappers.Monitor(env, video_dir, force=True,
video_callable=lambda idx: True,
write_upon_reset=True)
return env |
<SYSTEM_TASK:>
Create SimulatedEnv with minimal subset of hparams.
<END_TASK>
<USER_TASK:>
Description:
def create_simulated_env(
output_dir, grayscale, resize_width_factor, resize_height_factor,
frame_stack_size, generative_model, generative_model_params,
random_starts=True, which_epoch_data="last", **other_hparams
):
""""Create SimulatedEnv with minimal subset of hparams.""" |
# We need these, to initialize T2TGymEnv, but these values (hopefully) have
# no effect on player.
a_bit_risky_defaults = {
"game": "pong", # assumes that T2TGymEnv has always reward_range (-1,1)
"real_batch_size": 1,
"rl_env_max_episode_steps": -1,
"max_num_noops": 0
}
for key in a_bit_risky_defaults:
if key not in other_hparams:
other_hparams[key] = a_bit_risky_defaults[key]
hparams = hparam.HParams(
grayscale=grayscale,
resize_width_factor=resize_width_factor,
resize_height_factor=resize_height_factor,
frame_stack_size=frame_stack_size,
generative_model=generative_model,
generative_model_params=generative_model_params,
**other_hparams
)
return load_data_and_make_simulated_env(
output_dir, wm_dir=None, hparams=hparams,
which_epoch_data=which_epoch_data,
random_starts=random_starts) |
<SYSTEM_TASK:>
Infers standard paths to policy and model directories.
<END_TASK>
<USER_TASK:>
Description:
def infer_paths(output_dir, **subdirs):
"""Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
""" |
directories = {}
for name, path in six.iteritems(subdirs):
directories[name] = path if path else os.path.join(output_dir, name)
directories["output_dir"] = output_dir
return directories |
<SYSTEM_TASK:>
Add new observation to frame stack and infer policy.
<END_TASK>
<USER_TASK:>
Description:
def infer(self, ob):
"""Add new observation to frame stack and infer policy.
Args:
ob: array of shape (height, width, channels)
Returns:
logits and vf.
""" |
self._add_to_stack(ob)
logits, vf = self.infer_from_frame_stack(self._frame_stack)
return logits, vf |
<SYSTEM_TASK:>
Infer policy from stack of observations.
<END_TASK>
<USER_TASK:>
Description:
def infer_from_frame_stack(self, ob_stack):
"""Infer policy from stack of observations.
Args:
ob_stack: array of shape (1, frame_stack_size, height, width, channels)
Returns:
logits and vf.
""" |
logits, vf = self.sess.run([self.logits_t, self.value_function_t],
feed_dict={self.obs_t: ob_stack})
return logits, vf |
<SYSTEM_TASK:>
Normalizes the string using tokenizer.encode.
<END_TASK>
<USER_TASK:>
Description:
def _normalize_string(raw_str):
"""Normalizes the string using tokenizer.encode.
Args:
raw_str: the input string
Returns:
A string which is ready to be tokenized using split()
""" |
return " ".join(
token.strip()
for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str))) |
<SYSTEM_TASK:>
It dynamically instantiates a class for each babi subsets-tasks.
<END_TASK>
<USER_TASK:>
Description:
def _register_babi_problems():
"""It dynamically instantiates a class for each babi subsets-tasks.
@registry.register_problem
class BabiQaConcatAllTasks_10k(EditSequenceRegexProblem):
@property
def babi_task_id(self):
return "qa0"
@property
def babi_subset(self):
return "en-10k"
It does not put the classes into the global namespace, so to access the class
we rely on the registry or this module"s REGISTERED_PROBLEMS list.
It will be available as
registry.problem("babi_qa_concat_all_tasks_10k")
i.e., change camel case to snake case. Numbers are considered lower case
characters for these purposes.
""" |
for (subset, subset_suffix) in [("en", "_1k"), ("en-10k", "_10k")]:
for problem_name, babi_task_id in six.iteritems(_problems_to_register()):
problem_class = type("BabiQaConcat" + problem_name + subset_suffix,
(BabiQaConcat,), {
"babi_task_id": babi_task_id,
"babi_subset": subset
})
registry.register_problem(problem_class)
REGISTERED_PROBLEMS.append(problem_class.name) |
<SYSTEM_TASK:>
Builds encoder for the given class labels.
<END_TASK>
<USER_TASK:>
Description:
def get_labels_encoder(self, data_dir):
"""Builds encoder for the given class labels.
Args:
data_dir: data directory
Returns:
An encoder for class labels.
""" |
label_filepath = os.path.join(data_dir, self.vocab_filename)
return text_encoder.TokenTextEncoder(label_filepath) |
<SYSTEM_TASK:>
A generator that generates samples that are encoded.
<END_TASK>
<USER_TASK:>
Description:
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
"""A generator that generates samples that are encoded.
Args:
data_dir: data directory
tmp_dir: temp directory
dataset_split: dataset split
Yields:
A dict.
""" |
generator = self.generate_samples(data_dir, tmp_dir, dataset_split)
encoder = self.get_or_create_vocab(data_dir, tmp_dir)
label_encoder = self.get_labels_encoder(data_dir)
for sample in generator:
inputs = encoder.encode(sample["inputs"])
inputs.append(text_encoder.EOS_ID)
context = encoder.encode(sample["context"])
context.append(text_encoder.EOS_ID)
targets = label_encoder.encode(sample["targets"])
sample["targets"] = targets
yield {"inputs": inputs, "context": context, "targets": targets} |
<SYSTEM_TASK:>
Splits of data to produce and number the output shards for each.
<END_TASK>
<USER_TASK:>
Description:
def dataset_splits(self):
"""Splits of data to produce and number the output shards for each.""" |
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": self.num_train_shards,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": self.num_eval_shards,
}, {
"split": problem.DatasetSplit.TEST,
"shards": self.num_test_shards,
}] |
<SYSTEM_TASK:>
Adding to base hparams the attributes for for librispeech.
<END_TASK>
<USER_TASK:>
Description:
def add_librispeech_hparams(hparams):
"""Adding to base hparams the attributes for for librispeech.""" |
hparams.batch_size = 36
hparams.audio_compression = 8
hparams.hidden_size = 2048
hparams.max_input_seq_length = 600000
hparams.max_target_seq_length = 350
hparams.max_length = hparams.max_input_seq_length
hparams.min_length_bucket = hparams.max_input_seq_length // 2
hparams.learning_rate = 0.05
hparams.train_steps = 5000000
hparams.num_hidden_layers = 4
return hparams |
<SYSTEM_TASK:>
Generates linearized trees and tokens from the wsj tree format.
<END_TASK>
<USER_TASK:>
Description:
def words_and_tags_from_wsj_tree(tree_string):
"""Generates linearized trees and tokens from the wsj tree format.
It uses the linearized algorithm described in https://arxiv.org/abs/1412.7449.
Args:
tree_string: tree in wsj format
Returns:
tuple: (words, linearized tree)
""" |
stack, tags, words = [], [], []
for tok in tree_string.strip().split():
if tok[0] == "(":
symbol = tok[1:]
tags.append(symbol)
stack.append(symbol)
else:
assert tok[-1] == ")"
stack.pop() # Pop the POS-tag.
while tok[-2] == ")":
tags.append("/" + stack.pop())
tok = tok[:-1]
words.append(tok[:-1])
return str.join(" ", words), str.join(" ", tags[1:-1]) |
<SYSTEM_TASK:>
Map filename to the task id that created it assuming 1k tasks.
<END_TASK>
<USER_TASK:>
Description:
def filename_to_task_id(fname):
"""Map filename to the task id that created it assuming 1k tasks.""" |
# This matches the order and size in WikisumBase.out_filepaths
fname = os.path.basename(fname)
shard_id_increment = {
"train": 0,
"dev": 800,
"test": 900,
}
parts = fname.split("-")
split = parts[1]
shard_id = parts[2]
task_id = int(shard_id) + shard_id_increment[split]
return task_id |
<SYSTEM_TASK:>
Validate presence and minimum size of files.
<END_TASK>
<USER_TASK:>
Description:
def validate_data_files(problem, data_files, min_size):
"""Validate presence and minimum size of files.""" |
# Check that all files are present
data_dir = os.path.split(data_files[0])[0]
out_filepaths = problem.out_filepaths(data_dir)
missing_filepaths = set(out_filepaths) - set(data_files)
if missing_filepaths:
tf.logging.error("Missing %d data files", len(missing_filepaths))
# Check that each file is at least 100M
too_small = []
for data_file in data_files:
length = get_length(data_file)
if length < min_size:
too_small.append(data_file)
if too_small:
tf.logging.error("%d files too small", len(too_small))
bad_files = too_small + list(missing_filepaths)
return bad_files |
<SYSTEM_TASK:>
Downloading and preparing the dataset.
<END_TASK>
<USER_TASK:>
Description:
def _prepare_lambada_data(tmp_dir, data_dir, vocab_size, vocab_filename):
"""Downloading and preparing the dataset.
Args:
tmp_dir: tem directory
data_dir: data directory
vocab_size: size of vocabulary
vocab_filename: name of vocab file
""" |
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
file_path = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tar_all = tarfile.open(file_path)
tar_all.extractall(tmp_dir)
tar_all.close()
tar_train = tarfile.open(os.path.join(tmp_dir, "train-novels.tar"))
tar_train.extractall(tmp_dir)
tar_train.close()
vocab_path = os.path.join(data_dir, vocab_filename)
if not tf.gfile.Exists(vocab_path):
with tf.gfile.GFile(os.path.join(tmp_dir, _VOCAB), "r") as infile:
reader = csv.reader(infile, delimiter="\t")
words = [row[0] for row in reader]
words = [_UNK] + words[:vocab_size]
with tf.gfile.GFile(vocab_path, "w") as outfile:
outfile.write("\n".join(words)) |
<SYSTEM_TASK:>
Gives the file paths with regards to the given split.
<END_TASK>
<USER_TASK:>
Description:
def get_dataset_split(tmp_dir, split, use_control_set):
"""Gives the file paths with regards to the given split.
Args:
tmp_dir: temp directory
split: dataset split
use_control_set: uses control dataset if true.
Returns:
list of file paths.
""" |
if not use_control_set:
dataset_split = {
problem.DatasetSplit.TRAIN: [
f for f in tf.gfile.Glob(
os.path.join(tmp_dir, "train-novels/*/*.txt"))
],
problem.DatasetSplit.EVAL: [
os.path.join(tmp_dir, "lambada_development_plain_text.txt")
],
problem.DatasetSplit.TEST: [
os.path.join(tmp_dir, "lambada_test_plain_text.txt")
]
}
else:
dataset_split = {
problem.DatasetSplit.TRAIN: [
f for f in tf.gfile.Glob(
os.path.join(tmp_dir, "train-novels/*/*.txt"))
],
problem.DatasetSplit.EVAL: [
os.path.join(tmp_dir, "lambada_control_test_data_plain_text.txt")
],
}
return dataset_split[split] |
<SYSTEM_TASK:>
Determine the minimum sequence length given a dataset_split.
<END_TASK>
<USER_TASK:>
Description:
def min_sequence_length(self, dataset_split):
"""Determine the minimum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The minimum length that a sequence can be for this dataset_split.
""" |
return {
problem.DatasetSplit.TRAIN: 8,
problem.DatasetSplit.EVAL: 65,
problem.DatasetSplit.TEST: 65
}[dataset_split] |
<SYSTEM_TASK:>
Determine the maximum sequence length given a dataset_split.
<END_TASK>
<USER_TASK:>
Description:
def max_sequence_length(self, dataset_split):
"""Determine the maximum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The maximum length that a sequence can be for this dataset_split.
""" |
return {
problem.DatasetSplit.TRAIN: 64,
problem.DatasetSplit.EVAL: 128,
problem.DatasetSplit.TEST: 128
}[dataset_split] |
<SYSTEM_TASK:>
Determine the dataset sized given a dataset_split.
<END_TASK>
<USER_TASK:>
Description:
def num_samples(self, dataset_split):
"""Determine the dataset sized given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The desired number of samples for this dataset_split.
""" |
return {
problem.DatasetSplit.TRAIN: 1000000,
problem.DatasetSplit.EVAL: 10000,
problem.DatasetSplit.TEST: 10000
}[dataset_split] |
<SYSTEM_TASK:>
Create a T2T Estimator.
<END_TASK>
<USER_TASK:>
Description:
def create_estimator(model_name,
hparams,
run_config,
schedule="train_and_evaluate",
decode_hparams=None,
use_tpu=False,
use_tpu_estimator=False,
use_xla=False):
"""Create a T2T Estimator.""" |
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
model_name, hparams, decode_hparams=decode_hparams, use_tpu=use_tpu)
del use_xla
if use_tpu or use_tpu_estimator:
problem = hparams.problem
batch_size = (
problem.tpu_batch_size_per_shard(hparams) *
run_config.tpu_config.num_shards)
mlperf_log.transformer_print(
key=mlperf_log.INPUT_BATCH_SIZE, value=batch_size)
if getattr(hparams, "mtf_mode", False):
batch_size = problem.tpu_batch_size_per_shard(hparams)
predict_batch_size = batch_size
if decode_hparams and decode_hparams.batch_size:
predict_batch_size = decode_hparams.batch_size
if decode_hparams and run_config.tpu_config:
decode_hparams.add_hparam("iterations_per_loop",
run_config.tpu_config.iterations_per_loop)
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
config=run_config,
use_tpu=use_tpu,
train_batch_size=batch_size,
eval_batch_size=batch_size if "eval" in schedule else None,
predict_batch_size=predict_batch_size,
experimental_export_device_assignment=True)
else:
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
config=run_config,
)
return estimator |
<SYSTEM_TASK:>
Create train and eval hooks for Experiment.
<END_TASK>
<USER_TASK:>
Description:
def create_hooks(use_tfdbg=False,
use_dbgprofile=False,
dbgprofile_kwargs=None,
use_validation_monitor=False,
validation_monitor_kwargs=None,
use_early_stopping=False,
early_stopping_kwargs=None):
"""Create train and eval hooks for Experiment.""" |
train_hooks = []
eval_hooks = []
if use_tfdbg:
hook = debug.LocalCLIDebugHook()
train_hooks.append(hook)
eval_hooks.append(hook)
if use_dbgprofile:
# Recorded traces can be visualized with chrome://tracing/
# The memory/tensor lifetime is also profiled
tf.logging.info("Using ProfilerHook")
defaults = dict(save_steps=10, show_dataflow=True, show_memory=True)
defaults.update(dbgprofile_kwargs)
train_hooks.append(tf.train.ProfilerHook(**defaults))
if use_validation_monitor:
tf.logging.info("Using ValidationMonitor")
train_hooks.append(
tf.contrib.learn.monitors.ValidationMonitor(
hooks=eval_hooks, **validation_monitor_kwargs))
if use_early_stopping:
tf.logging.info("Using EarlyStoppingHook")
hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs)
# Adding to both training and eval so that eval aborts as well
train_hooks.append(hook)
eval_hooks.append(hook)
return train_hooks, eval_hooks |
<SYSTEM_TASK:>
Wrapper for canonical experiment_fn. See create_experiment.
<END_TASK>
<USER_TASK:>
Description:
def create_experiment_fn(*args, **kwargs):
"""Wrapper for canonical experiment_fn. See create_experiment.""" |
def experiment_fn(run_config, hparams):
return create_experiment(run_config, hparams, *args, **kwargs)
return experiment_fn |
<SYSTEM_TASK:>
Restore from a checkpoint.
<END_TASK>
<USER_TASK:>
Description:
def restore_checkpoint(ckpt_dir, saver, sess, must_restore=False):
"""Restore from a checkpoint.""" |
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if must_restore and not ckpt:
raise ValueError("No checkpoint found in %s" % ckpt_dir)
if not ckpt:
return 0
path = ckpt.model_checkpoint_path
tf.logging.info("Restoring checkpoint %s", path)
saver.restore(sess, path)
step = int(path.split("-")[-1])
return step |
<SYSTEM_TASK:>
Does eval and decode after training every eval_freq_in_steps.
<END_TASK>
<USER_TASK:>
Description:
def train_eval_and_decode(self):
"""Does eval and decode after training every eval_freq_in_steps.""" |
eval_steps = self._hparams.eval_freq_in_steps
packed_dataset = "_packed" in self._hparams.problem.name
mlperf_log.transformer_print(key=mlperf_log.TRAIN_LOOP)
for i in range(0, self._train_spec.max_steps, eval_steps):
mlperf_log.transformer_print(
key=mlperf_log.TRAIN_EPOCH, value=i // eval_steps)
if packed_dataset and i > 0:
problem = registry.problem(self._hparams.problem.name + "_packed")
p_hparams = problem.get_hparams(self._hparams)
self._hparams.problem = problem
self._hparams.problem_hparams = p_hparams
self._estimator.train(
self._train_spec.input_fn,
steps=eval_steps,
hooks=self._train_spec.hooks)
self._set_eval_dir_name("eval")
self._estimator.evaluate(
self._eval_spec.input_fn,
steps=self._eval_spec.steps,
hooks=self._eval_spec.hooks,
name="eval")
if packed_dataset:
problem = registry.problem(
self._hparams.problem.name.replace("_packed", ""))
p_hparams = problem.get_hparams(self._hparams)
self._hparams.problem = problem
self._hparams.problem_hparams = p_hparams
mlperf_log.transformer_print(key=mlperf_log.EVAL_START)
if self._hparams.mlperf_mode:
self._decode_hparams.mlperf_decode_step = i + eval_steps
self.decode(dataset_split=tf.estimator.ModeKeys.EVAL)
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "true"})
break
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and not d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "false"}) |
<SYSTEM_TASK:>
Evaluate until checkpoints stop being produced.
<END_TASK>
<USER_TASK:>
Description:
def continuous_eval(self):
"""Evaluate until checkpoints stop being produced.""" |
for ckpt_path in next_checkpoint(self._hparams.model_dir,
self._hparams.eval_timeout_mins):
# Skip zero'th step.
train_step = decoding.get_step_from_ckpt_path(ckpt_path)
if train_step == 0:
tf.logging.info("Skipping evaluation at step 0")
continue
self.evaluate() |
<SYSTEM_TASK:>
Evaluate on train data until checkpoints stop being produced.
<END_TASK>
<USER_TASK:>
Description:
def continuous_eval_on_train_data(self):
"""Evaluate on train data until checkpoints stop being produced.""" |
for ckpt_path in next_checkpoint(self._hparams.model_dir,
self._hparams.eval_timeout_mins):
# Skip zero'th step.
train_step = decoding.get_step_from_ckpt_path(ckpt_path)
if train_step == 0:
tf.logging.info("Skipping evaluation at step 0")
continue
self.evaluate_on_train_data() |
<SYSTEM_TASK:>
Starts a TensorFlow server and joins the serving thread.
<END_TASK>
<USER_TASK:>
Description:
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
""" |
config = tf.estimator.RunConfig()
server = tf.train.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
protocol=config.protocol)
server.join() |
<SYSTEM_TASK:>
Decodes from dataset or file.
<END_TASK>
<USER_TASK:>
Description:
def decode(self,
dataset_split=None,
decode_from_file=False,
checkpoint_path=None):
"""Decodes from dataset or file.""" |
if decode_from_file:
decoding.decode_from_file(self._estimator,
self._decode_hparams.decode_from_file,
self._hparams,
self._decode_hparams,
self._decode_hparams.decode_to_file)
else:
decoding.decode_from_dataset(
self._estimator,
self._hparams.problem.name,
self._hparams,
self._decode_hparams,
dataset_split=dataset_split,
checkpoint_path=checkpoint_path) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.