text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Flatten dict of dicts into a single dict with appropriate prefixes.
<END_TASK>
<USER_TASK:>
Description:
def _flatten_dict(original_dict):
"""Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
""" |
flat_dict = {}
for key, value in original_dict.items():
if isinstance(value, dict):
for name, tensor in value.items():
if isinstance(tensor, dict):
raise ValueError("flatten_dict only handles 2 levels of nesting.")
flat_key = "__" + key + "_" + name
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict |
<SYSTEM_TASK:>
Returns a dict of dicts if any prefixes match keys in the flat dict.
<END_TASK>
<USER_TASK:>
Description:
def _unflatten_dict(flat_dict, prefixes):
"""Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure.
""" |
original_dict = {}
for key, value in flat_dict.items():
prefix_found = False
for prefix in prefixes:
full_prefix = "__" + prefix + "_"
if key.startswith(full_prefix):
# Add a dict to the original dict with key=prefix
if prefix not in original_dict:
original_dict[prefix] = {}
original_dict[prefix][key[len(full_prefix):]] = value
prefix_found = True
break
if not prefix_found:
# No key matched a prefix in the for loop.
original_dict[key] = value
return original_dict |
<SYSTEM_TASK:>
Dummy vars for restore to work when not using TPU codepath.
<END_TASK>
<USER_TASK:>
Description:
def create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath.""" |
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False) |
<SYSTEM_TASK:>
Construct a host_call writing scalar summaries.
<END_TASK>
<USER_TASK:>
Description:
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
""" |
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
log_info("summary_kwargs %s" % str(summary_kwargs))
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs) |
<SYSTEM_TASK:>
Average losses across datashards.
<END_TASK>
<USER_TASK:>
Description:
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
""" |
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses |
<SYSTEM_TASK:>
Compose two custom getters.
<END_TASK>
<USER_TASK:>
Description:
def _compose_custom_getters(getter_a, getter_b):
"""Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
""" |
if not getter_a:
return getter_b
if not getter_b:
return getter_a
def getter_fn(getter, *args, **kwargs):
return getter_b(functools.partial(getter_a, getter), *args, **kwargs)
return getter_fn |
<SYSTEM_TASK:>
Set a custom getter in the current variable scope.
<END_TASK>
<USER_TASK:>
Description:
def set_custom_getter_compose(custom_getter):
"""Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
""" |
tf.get_variable_scope().set_custom_getter(
_compose_custom_getters(tf.get_variable_scope().custom_getter,
custom_getter)) |
<SYSTEM_TASK:>
Initialize variables from given directory.
<END_TASK>
<USER_TASK:>
Description:
def initialize_from_ckpt(ckpt_dir, hparams):
"""Initialize variables from given directory.""" |
model_dir = hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
return
tf.logging.info("Checkpoint dir: %s", ckpt_dir)
reader = tf.contrib.framework.load_checkpoint(ckpt_dir)
variable_map = {}
for var in tf.contrib.framework.get_trainable_variables():
var_name = var.name.split(":")[0]
if reader.has_tensor(var_name):
tf.logging.info("Loading variable from checkpoint: %s", var_name)
variable_map[var_name] = var
else:
tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
var_name)
tf.train.init_from_checkpoint(ckpt_dir, variable_map) |
<SYSTEM_TASK:>
Whether the target modality is real-valued.
<END_TASK>
<USER_TASK:>
Description:
def _target_modality_is_real(self):
"""Whether the target modality is real-valued.""" |
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality = self._problem_hparams.modality["targets"]
modality_name = self._hparams.name.get(
"targets",
modalities.get_name(modality))(self._hparams, vocab_size)
return modality_name.startswith("real") |
<SYSTEM_TASK:>
Estimator model_fn sharded along batch dimension.
<END_TASK>
<USER_TASK:>
Description:
def model_fn_sharded(self, sharded_features):
"""Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
""" |
dp = self._data_parallelism
# [{str: Tensor}]. Transpose of 'sharded_features'.
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded():
if self.hparams.scheduled_sampling_prob > 0.0:
raise NotImplementedError(
"Scheduled sampling for non-sharded body only.")
# MoE models override body_sharded
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = collections.OrderedDict()
sharded_losses = collections.OrderedDict()
for k, v in sorted(six.iteritems(body_out)):
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([({
"training": l
} for l in loss) for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
if isinstance(sharded_losses, tuple):
nums, dens = sharded_losses
sharded_losses = zip(nums, dens)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
sharded_logits, sharded_losses = dp(
self.maybe_scheduled_sampling,
datashard_to_features, sharded_logits, sharded_losses)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
return sharded_logits, losses |
<SYSTEM_TASK:>
Transforms features to feed into body.
<END_TASK>
<USER_TASK:>
Description:
def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
""" |
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features |
<SYSTEM_TASK:>
Computes logits given body output and features.
<END_TASK>
<USER_TASK:>
Description:
def top(self, body_output, features):
"""Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
When targets are generated at training time:
logits == {
"self_generated_targets": <generated targets tensor>
"logits": <original logits Tensor or dict>
}
""" |
if isinstance(body_output, dict):
logits = {}
for k, v in six.iteritems(body_output):
# TODO(aidangomez): share variables here?
with tf.variable_scope(k) as top_vs:
self._add_variable_scope("top_%s" % k, top_vs)
logits[k] = self._top_single(v, k, features)
return logits
else:
return self._top_single(body_output, "targets", features) |
<SYSTEM_TASK:>
Return a training op minimizing loss.
<END_TASK>
<USER_TASK:>
Description:
def optimize(self, loss, num_async_replicas=1, use_tpu=False):
"""Return a training op minimizing loss.""" |
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu)
return train_op |
<SYSTEM_TASK:>
Set hparams with the given mode.
<END_TASK>
<USER_TASK:>
Description:
def set_mode(self, mode):
"""Set hparams with the given mode.""" |
log_info("Setting T2TModel mode to '%s'", mode)
hparams = hparams_lib.copy_hparams(self._original_hparams)
hparams.add_hparam("mode", mode)
# When not in training mode, set all forms of dropout to zero.
if mode != tf.estimator.ModeKeys.TRAIN:
for key in hparams.values():
if key.endswith("dropout") or key == "label_smoothing":
log_info("Setting hparams.%s to 0.0", key)
setattr(hparams, key, 0.0)
self._hparams = hparams |
<SYSTEM_TASK:>
Autoregressive eval.
<END_TASK>
<USER_TASK:>
Description:
def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
""" |
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"] |
<SYSTEM_TASK:>
A inference method.
<END_TASK>
<USER_TASK:>
Description:
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
""" |
set_custom_getter_compose(self._custom_getter)
with self._eager_var_store.as_default():
# TODO(rsepassi): Make decoding work with real-valued model outputs
# (i.e. if the target modality is RealModality).
self.prepare_features_for_infer(features)
if not self.has_input and beam_size > 1:
log_warn("Beam searching for a model with no inputs.")
if not self.has_input and self.hparams.sampling_method != "random":
log_warn("Non-random sampling for a model with no inputs.")
self._fill_problem_hparams_features(features)
if self._problem_hparams:
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
beam_size = 1 # No use to run beam-search for a single class.
if beam_size == 1:
log_info("Greedy Decoding")
results = self._greedy_infer(features, decode_length, use_tpu)
else:
log_info("Beam Decoding with beam size %d" % beam_size)
results = self._beam_decode(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
return results |
<SYSTEM_TASK:>
Beam search decoding.
<END_TASK>
<USER_TASK:>
Description:
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
""" |
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha, use_tpu) |
<SYSTEM_TASK:>
A greedy inference method.
<END_TASK>
<USER_TASK:>
Description:
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
""" |
if use_tpu:
return self._slow_greedy_infer_tpu(features, decode_length)
return self._slow_greedy_infer(features, decode_length) |
<SYSTEM_TASK:>
Run the model and extract samples.
<END_TASK>
<USER_TASK:>
Description:
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
""" |
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses |
<SYSTEM_TASK:>
Adds `tf.summary`s to all terms in the losses dictionary.
<END_TASK>
<USER_TASK:>
Description:
def _summarize_losses(self, losses_dict):
"""Adds `tf.summary`s to all terms in the losses dictionary.""" |
if common_layers.should_generate_summaries():
with tf.name_scope("losses"):
for loss_name, loss_val in sorted(losses_dict.items()):
tf.summary.scalar(loss_name, loss_val) |
<SYSTEM_TASK:>
Scheduled sampling.
<END_TASK>
<USER_TASK:>
Description:
def maybe_scheduled_sampling(self, features, logits, losses):
"""Scheduled sampling.
Performs forward inference again with "targets" feature replaced with values
sampled from the model.
This is the identity unless self.hparams.scheduled_sampling_prob > 0
(default).
**WARNING**: This is not a faithful implementation of scheduled sampling.
This implementation samples tokens for timestep t condtioned on gold tokens
1...t-1. A proper implementation must condition on a mix of gold and
sampled tokens. Doing so is not efficient for models such like Transformer.
Args:
features: {str: Tensor}. Features sharded along batch dimension.
logits: Tensor. Logits for each shard of data.
losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor
Returns:
new_logits: Tensor.
new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or
(ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a
weighted average.
""" |
hparams = self.hparams
problem_hparams = self._problem_hparams
# Only do scheduled sampling if requested.
if hparams.scheduled_sampling_prob == 0.0:
return (logits, losses)
# Only do scheduled sampling on language tasks.
modality = problem_hparams.modality["targets"]
if modality != modalities.ModalityType.SYMBOL:
assert hparams.scheduled_sampling_prob == 0, (
"Scheduled sampling only applies to ModalityType.SYMBOL. Set "
"hparams.scheduled_sampling_prob == 0.0.")
return (logits, losses)
# Only do scheduled sampling when training.
is_training = (hparams.mode == tf.estimator.ModeKeys.TRAIN)
if not is_training:
tf.logging.info("Running in %s mode. Not using scheduled sampling.",
hparams.mode)
return (logits, losses)
# Pad vocabulary if vocab size must be evenly divisible by vocab_divisor.
vocab_size = problem_hparams.vocab_size["targets"]
assert vocab_size is not None
assert hparams.vocab_divisor == 1
def sample(x):
"""Multinomial sampling from a n-dimensional tensor."""
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
def mix_gold_sampled(gold_targets, sampled_targets, mixin_prob):
"""Interleave sampled and gold tokens randomly."""
return tf.where(
tf.less(
tf.random_uniform(common_layers.shape_list(sampled_targets)),
mixin_prob),
sampled_targets,
gold_targets)
def sampled_results(features, logits, mixin_prob):
"""Generate scheduled sampling results."""
sampled_targets = sample(logits)
new_targets = mix_gold_sampled(features["targets"],
sampled_targets,
mixin_prob)
new_targets = tf.stop_gradient(new_targets) # Treat new_targets as given.
new_features = copy.copy(features)
new_features["targets"] = new_targets
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
# Compute bottom() for new_targets.
#
# TODO(duckworthd): Only apply bottom to 'new_targets'.
new_transformed_features = self.bottom(new_features)
# Compute body.
with tf.variable_scope("body"):
new_body_outputs, new_losses = self._normalize_body_output(
self.body(new_transformed_features))
assert "training" not in new_losses
# Compute top.
new_logits = self.top(new_body_outputs, new_features)
# Compute loss. Use original features (== labels).
if (hparams.mode != tf.estimator.ModeKeys.PREDICT and
hparams.mode != "attack"):
new_losses["training"] = self.loss(new_logits, features)
else:
new_losses["training"] = 0.0
return new_logits, new_losses
tf.logging.info("Using scheduled sampling.")
assert hparams.scheduled_sampling_prob == 1.0, (
"hparams.scheduled_sampling_prob must be 0 or 1.")
# Gradually increase over a warmup period. Lower numbers mean more gold
# tokens.
mixin_prob = (
hparams.scheduled_sampling_gold_mixin_prob *
common_layers.inverse_exp_decay(
hparams.scheduled_sampling_warmup_steps,
min_value=0.001)
)
# Apply scheduled sampling over N passes. The logits from the (n-1)-th pass
# will be mixed with gold tokens for conditioning in the n-th pass.
scheduled_sampling_num_passes = getattr(
hparams, "scheduled_sampling_num_passes", 1)
assert scheduled_sampling_num_passes > 0, (
"hparams.scheduled_sampling_num_passes must be > 0 if "
"hparams.scheduled_sampling_prob > 0.0")
new_logits = logits
new_losses = losses
for _ in range(scheduled_sampling_num_passes):
new_logits, new_losses = sampled_results(features, new_logits, mixin_prob)
return new_logits, new_losses |
<SYSTEM_TASK:>
Duplicate elements of bc by length_factor.
<END_TASK>
<USER_TASK:>
Description:
def expand_batch_coordinates(bc, length_factor):
"""Duplicate elements of bc by length_factor.
Args:
bc (tf.Tensor): int32 tensor of shape [1, length, 1]
length_factor (int):
Returns:
tf.Tensor: of shape [1, length*length_factor, 1] where every elements has
been duplicated length_factor times.
""" |
assert bc.get_shape().as_list() == [1, None, 1]
# bc has shape [1, length, 1]
bc *= tf.constant([[1] * length_factor])
# bc has shape [1, length, length_factor]
bc = tf.reshape(bc, [1, -1, 1])
# bc has shape [1, length*length_factor]
return bc |
<SYSTEM_TASK:>
Remove padding by concatenating all dimension into one.
<END_TASK>
<USER_TASK:>
Description:
def remove_pad(x, pad_remover, mode):
"""Remove padding by concatenating all dimension into one.
Args:
x (tf.Tensor): input of shape [batch_size, length, depth]
pad_remover (obj): a PadRemover object
mode (ModeKeys): infer, train or eval. If inference, the padding remover is
not applied
Returns:
tf.Tensor of shape [1,length_nonpad,depth] where
length_nonpad <= batch_size*length
""" |
# Concatenate all tokens (without padding)
x = expert_utils.flatten_all_but_last(x)
# Remove padding for training and eval
if mode != ModeKeys.PREDICT:
# This is a hack to allows inference when the <go> token
# is detected as padding and removed. This works for now because there is
# no padding at inference.
x = pad_remover.remove(x)
x = tf.expand_dims(x, axis=0) # Now batch_size=1
return x |
<SYSTEM_TASK:>
Experiment with the exp_factor params.
<END_TASK>
<USER_TASK:>
Description:
def attention_lm_ae_extended():
"""Experiment with the exp_factor params.""" |
hparams = attention_lm_moe_base_long_seq()
hparams.attention_layers = "eeee"
hparams.attention_local = True
# hparams.factored_logits=1 # Necessary when the number of expert grow bigger
hparams.attention_moe_k = 2
hparams.attention_exp_factor = 4
# hparams.attention_exp_inputdim = 128
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
return hparams |
<SYSTEM_TASK:>
Large model for distributed training.
<END_TASK>
<USER_TASK:>
Description:
def attention_lm_moe_large():
"""Large model for distributed training.
Over 1B parameters, so requires multi-gpu training due to memory
requirements.
on lm1b_32k:
After 45K steps on 8 GPUs (synchronous):
eval_log_ppl_per_token = 3.18
eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9
Returns:
an hparams object.
""" |
hparams = attention_lm_moe_base()
hparams.num_hidden_layers = 5
hparams.moe_layers = "3"
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 4096
hparams.moe_hidden_sizes = "4096"
hparams.moe_num_experts = 128
hparams.layer_prepostprocess_dropout = 0.2
return hparams |
<SYSTEM_TASK:>
Unnecessarily large model with 24B params - because we can.
<END_TASK>
<USER_TASK:>
Description:
def attention_lm_moe_24b_diet():
"""Unnecessarily large model with 24B params - because we can.""" |
hparams = attention_lm_moe_large_diet()
hparams.moe_hidden_sizes = "12288"
hparams.moe_num_experts = 1024
hparams.batch_size = 4096
return hparams |
<SYSTEM_TASK:>
Transform input from data space to model space.
<END_TASK>
<USER_TASK:>
Description:
def audio_bottom(x, model_hparams, vocab_size):
"""Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size].
""" |
del vocab_size # unused arg
inputs = x
with tf.variable_scope("audio_modality"):
# TODO(aidangomez): Will need to sort out a better audio pipeline
def xnet_resblock(x, filters, res_relu, name):
"""Xception block."""
with tf.variable_scope(name):
# Typically audio samples are >100k samples in length and have a width
# of 2 or 4. Mono audio has a single channel while stereo has 2.
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 2),
first_relu=res_relu,
force2d=True,
name="res_conv0")
x = tf.to_float(inputs) / 255.
x.set_shape([None, None, None, 1])
for i in range(model_hparams.audio_compression):
x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i)
return xnet_resblock(x,
model_hparams.hidden_size,
False,
"compress_block_final") |
<SYSTEM_TASK:>
Compresses channel-wise input pixels into whole pixel representions.
<END_TASK>
<USER_TASK:>
Description:
def _image_channel_compress_bottom(inputs, model_hparams, name="bottom"):
"""Compresses channel-wise input pixels into whole pixel representions.
Perform conversion of RGB pixel values to a real number in the range -1 to
1. This combines pixel channels to form a representation of shape
[img_len, img_len].
Args:
inputs: Tensor representing RGB pixel intensities as integers, of shape
[batch, img_len, img_len, channels].
model_hparams: HParams, model hyperparmeters.
name: string, scope.
Returns:
body_input: Tensor of shape
[batch, img_len, img_len, model_hparams.hidden_size].
""" |
num_channels = 3
with tf.variable_scope(name):
inputs = tf.to_float(inputs)
hp = model_hparams
if hp.mode != tf.estimator.ModeKeys.PREDICT:
tf.summary.image(
"inputs",
common_layers.tpu_safe_image_summary(inputs),
max_outputs=2)
inputs = common_layers.convert_rgb_to_symmetric_real(inputs)
# Reshape inputs to apply convolutions across [img_len, img_len*channels].
inputs_shape = common_layers.shape_list(inputs)
inputs = tf.reshape(
inputs, [-1, inputs_shape[1], inputs_shape[2] * inputs_shape[3], 1])
# Compress RGB intensities for each pixel using a convolution.
outputs = tf.layers.conv2d(
inputs,
model_hparams.hidden_size,
kernel_size=(1, num_channels),
padding="VALID",
strides=(1, num_channels),
activation=tf.nn.relu,
name="conv_input")
return outputs |
<SYSTEM_TASK:>
Bottom transformation for image targets.
<END_TASK>
<USER_TASK:>
Description:
def image_channel_embeddings_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for image targets.""" |
del vocab_size # unused arg
inputs = tf.to_int32(x)
io_depth = model_hparams.num_channels
tshape = common_layers.shape_list(inputs)
hidden_size = model_hparams.hidden_size
target_embeddings = cia.get_channel_embeddings(
io_depth, inputs, hidden_size, "input_bottom")
return tf.reshape(target_embeddings,
[tshape[0], tshape[1], tshape[2] * io_depth, hidden_size]) |
<SYSTEM_TASK:>
Use batchnorm instead of CMVN and shorten the stft with strided convs.
<END_TASK>
<USER_TASK:>
Description:
def speech_recognition_bottom(x, model_hparams, vocab_size):
"""Use batchnorm instead of CMVN and shorten the stft with strided convs.
Args:
x: float32 tensor with shape [batch_size, len, 1, freqs * channels]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
float32 tensor with shape [batch_size, shorter_len, 1, hidden_size]
""" |
del vocab_size # unused arg
inputs = x
p = model_hparams
num_mel_bins = p.audio_num_mel_bins
num_channels = 3 if p.audio_add_delta_deltas else 1
with tf.variable_scope("speech_recognition_modality"):
if p.audio_preproc_in_bottom:
# Compute filterbanks
with tf.variable_scope("fbanks"):
waveforms = tf.squeeze(inputs, [2, 3])
mel_fbanks = common_audio.compute_mel_filterbank_features(
waveforms,
sample_rate=p.audio_sample_rate,
dither=p.audio_dither,
preemphasis=p.audio_preemphasis,
frame_length=p.audio_frame_length,
frame_step=p.audio_frame_step,
lower_edge_hertz=p.audio_lower_edge_hertz,
upper_edge_hertz=p.audio_upper_edge_hertz,
num_mel_bins=p.audio_num_mel_bins,
apply_mask=True)
if p.audio_add_delta_deltas:
mel_fbanks = common_audio.add_delta_deltas(mel_fbanks)
x = tf.reshape(mel_fbanks,
common_layers.shape_list(mel_fbanks)[:2] +
[num_mel_bins, num_channels])
nonpadding_mask = 1. - common_attention.embedding_to_padding(x)
num_of_nonpadding_elements = tf.reduce_sum(
nonpadding_mask) * num_mel_bins * num_channels
# This replaces CMVN estimation on data
var_epsilon = 1e-09
mean = tf.reduce_sum(
x, axis=[1], keepdims=True) / num_of_nonpadding_elements
variance = (num_of_nonpadding_elements * mean**2. -
2. * mean * tf.reduce_sum(x, axis=[1], keepdims=True) +
tf.reduce_sum(x**2, axis=[1], keepdims=True)
) / num_of_nonpadding_elements
x = (x - mean) * tf.rsqrt(variance + var_epsilon) * tf.expand_dims(
nonpadding_mask, -1)
else:
x = inputs
# The convention is that the models are flattened along the spatial,
# dimensions, thus the speech preprocessor treats frequencies and
# channels as image colors (last axis)
x.set_shape([None, None, num_mel_bins, num_channels])
# TODO(chorowski): how to specify bottom's hparams and avoid hardcoding?
x = tf.pad(x, [[0, 0], [0, 8], [0, 0], [0, 0]])
for _ in range(2):
x = tf.layers.conv2d(
x, 128, (3, 3), (2, 2), use_bias=False)
x = common_layers.layer_norm(x)
x = tf.nn.relu(x)
xshape = common_layers.shape_list(x)
# apply a conv that will remove all frequencies and at the same time
# project the output into desired hidden_size
x = tf.pad(x, [[0, 0], [0, 2], [0, 0], [0, 0]])
x = tf.layers.conv2d(x, p.hidden_size, (3, xshape[2]), use_bias=False)
assert common_layers.shape_list(x)[2] == 1
x = common_layers.layer_norm(x)
x = tf.nn.relu(x)
return x |
<SYSTEM_TASK:>
Create or get concatenated embedding or softmax variable.
<END_TASK>
<USER_TASK:>
Description:
def get_weights(model_hparams, vocab_size, hidden_dim=None):
"""Create or get concatenated embedding or softmax variable.
Args:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size
Returns:
a list of num_shards Tensors.
""" |
if hidden_dim is None:
hidden_dim = model_hparams.hidden_size
num_shards = model_hparams.symbol_modality_num_shards
shards = []
for i in range(num_shards):
shard_size = (vocab_size // num_shards) + (
1 if i < vocab_size % num_shards else 0)
var_name = "weights_%d" % i
shards.append(
tf.get_variable(
var_name, [shard_size, hidden_dim],
initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5)))
if num_shards == 1:
ret = shards[0]
else:
ret = tf.concat(shards, 0)
# Convert ret to tensor.
if not tf.executing_eagerly():
ret = common_layers.convert_gradient_to_tensor(ret)
return ret |
<SYSTEM_TASK:>
Bottom transformation for symbols.
<END_TASK>
<USER_TASK:>
Description:
def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse):
"""Bottom transformation for symbols.""" |
with tf.variable_scope(name, reuse=reuse):
# Ensure the inputs are 3-D
if len(x.get_shape()) == 4:
x = tf.squeeze(x, axis=3)
while len(x.get_shape()) < 3:
x = tf.expand_dims(x, axis=-1)
var = get_weights(model_hparams, vocab_size)
x = common_layers.dropout_no_scaling(
x, 1.0 - model_hparams.symbol_dropout)
ret = common_layers.gather(var, x)
if model_hparams.multiply_embedding_mode == "sqrt_depth":
ret *= model_hparams.hidden_size**0.5
ret *= tf.expand_dims(
common_layers.cast_like(tf.not_equal(x, 0), ret), -1)
return ret |
<SYSTEM_TASK:>
Bottom transformation for embedding video bitwise.
<END_TASK>
<USER_TASK:>
Description:
def video_bitwise_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for embedding video bitwise.""" |
pixel_embedding_size = 64
inputs = x
with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE):
common_layers.summarize_video(inputs, "bottom")
# Embed bitwise.
assert vocab_size == 256
embedded = discretization.int_to_bit_embed(inputs, 8,
pixel_embedding_size)
# Project.
return tf.layers.dense(
embedded,
model_hparams.hidden_size,
name="merge_pixel_embedded_frames") |
<SYSTEM_TASK:>
Bottom transformation for video.
<END_TASK>
<USER_TASK:>
Description:
def video_pixel_noise_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for video.""" |
input_noise = getattr(model_hparams, "video_modality_input_noise", 0.25)
inputs = x
if model_hparams.mode == tf.estimator.ModeKeys.TRAIN:
background = tfp.stats.percentile(inputs, 50., axis=[0, 1, 2, 3])
input_shape = common_layers.shape_list(inputs)
input_size = tf.reduce_prod(input_shape[:-1])
input_mask = tf.multinomial(
tf.log([[input_noise, 1.-input_noise]]), input_size)
input_mask = tf.reshape(tf.cast(input_mask, tf.int32),
input_shape[:-1]+[1])
inputs = inputs * input_mask + background * (1 - input_mask)
return video_bottom(inputs, model_hparams, vocab_size) |
<SYSTEM_TASK:>
Convert prediction and target from rgb to real.
<END_TASK>
<USER_TASK:>
Description:
def convert_rgb_to_real(prediction, targets):
"""Convert prediction and target from rgb to real.""" |
prediction = tf.squeeze(prediction, axis=-1)
prediction = common_layers.convert_rgb_to_real(prediction)
targets = common_layers.convert_rgb_to_real(targets)
return prediction, targets |
<SYSTEM_TASK:>
Average loss over the labels.
<END_TASK>
<USER_TASK:>
Description:
def multi_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Average loss over the labels.""" |
del vocab_size # unused arg
logits = top_out
num_labels = tf.shape(targets)[1]
logits = tf.tile(logits, [1, num_labels, 1, 1, 1])
xent, weights = common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
weights_fn=weights_fn,
reduce_sum=False,
)
xent = tf.squeeze(xent, [2, 3])
weights = tf.squeeze(weights, [2, 3])
# average loss over all labels
loss = tf.reduce_sum(xent, axis=1)
weights = tf.reduce_sum(weights, axis=1)
loss /= (weights + 1e-8)
weights = tf.to_float(tf.greater(weights, 0.))
return tf.reduce_sum(loss*weights), tf.reduce_sum(weights) |
<SYSTEM_TASK:>
Apply softmax cross-entropy between outputs and targets.
<END_TASK>
<USER_TASK:>
Description:
def one_hot_class_label_loss(top_out,
targets,
model_hparams,
vocab_size,
weights_fn):
"""Apply softmax cross-entropy between outputs and targets.
Args:
top_out: logits Tensor with shape [batch, ?, ?, num_classes]
targets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
weights_fn:
Returns:
loss_scale (cross-entropy), loss_denom
""" |
del model_hparams, vocab_size # unused arg
loss_scale = tf.losses.softmax_cross_entropy(
onehot_labels=targets, logits=top_out)
weights = weights_fn(targets)
loss_denom = tf.reduce_sum(weights)
return loss_scale, loss_denom |
<SYSTEM_TASK:>
Transform inputs from model space to target space.
<END_TASK>
<USER_TASK:>
Description:
def class_label_top(body_output, targets, model_hparams, vocab_size):
"""Transform inputs from model space to target space.
Average over inner dims and a linear layer to logits.
Args:
body_output: A Tensor with shape [batch, ?, ?, body_output_size].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
a Tensors, each with shape [batch_size, 1, 1, 1, vocab_size]
""" |
del targets # unused arg
with tf.variable_scope("class_label_modality_%d_%d" % (
vocab_size, model_hparams.hidden_size)):
x = body_output
x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
res = tf.layers.dense(x, vocab_size)
return tf.expand_dims(res, 3) |
<SYSTEM_TASK:>
Transforms body output to return logits.
<END_TASK>
<USER_TASK:>
Description:
def image_channel_compress_top(body_output, targets, model_hparams, vocab_size):
"""Transforms body output to return logits.
Args:
body_output: Tensor of shape [batch, img_len, img_len, depth].
targets:
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
Tensor of shape [batch, img_len, img_len, channels, vocab_size].
""" |
del targets # unused arg
with tf.variable_scope("image_channel_compress_modality"):
hidden_size = model_hparams.hidden_size
img_len = model_hparams.img_len
channels = 3 # RGB
batch = common_layers.shape_list(body_output)[0]
x = tf.layers.conv2d(
body_output,
hidden_size * channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
activation=tf.nn.relu,
name="decompress_conv")
x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_size])
x = common_layers.layer_preprocess(x, model_hparams)
x = tf.layers.dense(x,
vocab_size,
use_bias=True,
activation=None,
name="output_conv")
x = tf.reshape(
x, [batch, img_len, img_len, channels, vocab_size])
return x |
<SYSTEM_TASK:>
Generate logits.
<END_TASK>
<USER_TASK:>
Description:
def symbol_top(body_output, targets, model_hparams, vocab_size):
"""Generate logits.
Args:
body_output: A Tensor with shape
[batch, p0, p1, model_hparams.hidden_size].
targets: Unused.
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].
""" |
del targets # unused arg
if model_hparams.shared_embedding_and_softmax_weights:
scope_name = "shared"
reuse = tf.AUTO_REUSE
else:
scope_name = "softmax"
reuse = False
with tf.variable_scope(scope_name, reuse=reuse):
body_output_shape = common_layers.shape_list(body_output)
var = get_weights(model_hparams, vocab_size, body_output_shape[-1])
if (model_hparams.factored_logits and
model_hparams.mode == tf.estimator.ModeKeys.TRAIN):
# insert channels dimension
body_output = tf.expand_dims(body_output, 3)
return common_layers.FactoredTensor(body_output, var)
else:
body_output = tf.reshape(body_output, [-1, body_output_shape[-1]])
logits = tf.matmul(body_output, var, transpose_b=True)
return tf.reshape(logits,
body_output_shape[:-1] + [1, vocab_size]) |
<SYSTEM_TASK:>
Generates all possible pair combinations for the input list of sentences.
<END_TASK>
<USER_TASK:>
Description:
def create_combination(list_of_sentences):
"""Generates all possible pair combinations for the input list of sentences.
For example:
input = ["paraphrase1", "paraphrase2", "paraphrase3"]
output = [("paraphrase1", "paraphrase2"),
("paraphrase1", "paraphrase3"),
("paraphrase2", "paraphrase3")]
Args:
list_of_sentences: the list of input sentences.
Returns:
the list of all possible sentence pairs.
""" |
num_sentences = len(list_of_sentences) - 1
combinations = []
for i, _ in enumerate(list_of_sentences):
if i == num_sentences:
break
num_pairs = num_sentences - i
populated = num_pairs * [list_of_sentences[i]]
zipped = list(zip(populated, list_of_sentences[i + 1:]))
combinations += zipped
return combinations |
<SYSTEM_TASK:>
hparams fo 8 layer big 2d model for cifar 10.
<END_TASK>
<USER_TASK:>
Description:
def imagetransformer2d_base_8l_8_32_big():
"""hparams fo 8 layer big 2d model for cifar 10.""" |
hparams = image_transformer2d_base()
hparams.num_heads = 16
hparams.hidden_size = 1024
hparams.filter_size = 2048
hparams.num_decoder_layers = 8
hparams.batch_size = 1
hparams.layer_prepostprocess_dropout = 0.3
hparams.query_shape = (8, 16)
hparams.memory_flange = (0, 32)
hparams.unconditional = int(False)
return hparams |
<SYSTEM_TASK:>
Base params for local1d attention.
<END_TASK>
<USER_TASK:>
Description:
def img2img_transformer_base():
"""Base params for local1d attention.""" |
hparams = image_transformer2d_base()
# learning related flags
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
# This version seems to benefit from a higher learning rate.
hparams.learning_rate = 0.2
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate_warmup_steps = 12000
hparams.filter_size = 2048
hparams.num_encoder_layers = 4
hparams.num_decoder_layers = 8
hparams.block_length = 256
hparams.block_width = 256
hparams.dec_attention_type = cia.AttentionType.LOCAL_1D
hparams.block_raster_scan = False
return hparams |
<SYSTEM_TASK:>
Hparams for training img2img_transformer on tpu.
<END_TASK>
<USER_TASK:>
Description:
def img2img_transformer_base_tpu():
"""Hparams for training img2img_transformer on tpu.""" |
hparams = img2img_transformer_base()
update_hparams_for_tpu(hparams)
hparams.batch_size = 2
hparams.num_heads = 4 # heads are expensive on tpu
hparams.num_decoder_layers = 8
hparams.num_encoder_layers = 4
hparams.shared_embedding_and_softmax_weights = False
return hparams |
<SYSTEM_TASK:>
Residual feed-forward layer with normalization at start.
<END_TASK>
<USER_TASK:>
Description:
def ResidualFeedForward(feature_depth,
feedforward_depth,
dropout,
mode):
"""Residual feed-forward layer with normalization at start.""" |
return layers.Residual(
layers.LayerNorm(),
layers.Dense(feedforward_depth),
layers.Relu(),
layers.Dropout(rate=dropout, mode=mode),
layers.Dense(feature_depth),
layers.Dropout(rate=dropout, mode=mode)
) |
<SYSTEM_TASK:>
Transformer encoder layer.
<END_TASK>
<USER_TASK:>
Description:
def EncoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
mode):
"""Transformer encoder layer.
The input to the encoder is a pair (embedded source, mask) where
the mask is created from the original source to prevent attending
to the padding part of the input.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
mode: str: 'train' or 'eval'
Returns:
the layer, returning a pair (actiavtions, mask).
""" |
# The encoder block expects (activation, mask) as input and returns
# the new activations only, we add the mask back to output next.
encoder_block = layers.Serial(
layers.Residual( # Attention block here.
layers.Parallel(layers.LayerNorm(), layers.Identity()),
layers.MultiHeadedAttention(feature_depth, num_heads=num_heads,
dropout=dropout, mode=mode),
layers.Dropout(rate=dropout, mode=mode),
shortcut=layers.FirstBranch()
),
ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode)
)
# Now we add the mask back.
return layers.Serial(
layers.Reorder(output=((0, 1), 1)), # (x, mask) --> ((x, mask), mask)
layers.Parallel(encoder_block, layers.Identity())
) |
<SYSTEM_TASK:>
Transformer encoder.
<END_TASK>
<USER_TASK:>
Description:
def TransformerEncoder(vocab_size,
num_classes=10,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
max_len=2048,
mode='train'):
"""Transformer encoder.
Args:
vocab_size: int: vocab size
num_classes: how many classes on output
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the Transformer encoder layer.
""" |
input_embedding = layers.Serial(
layers.Embedding(feature_depth, vocab_size),
layers.Dropout(rate=dropout, mode=mode),
layers.PositionalEncoding(max_len=max_len)
)
return layers.Serial(
layers.Branch(), # Branch input to create embedding and mask.
layers.Parallel(input_embedding, layers.PaddingMask()),
layers.Serial(*[EncoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, mode)
for _ in range(num_layers)]),
layers.FirstBranch(), # Drop the mask.
layers.LayerNorm(),
layers.Mean(axis=1), # Average on length.
layers.Dense(num_classes),
layers.LogSoftmax()
) |
<SYSTEM_TASK:>
Transformer decoder layer.
<END_TASK>
<USER_TASK:>
Description:
def DecoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
mode):
"""Transformer decoder layer.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
mode: str: 'train' or 'eval'
Returns:
the layer.
""" |
return layers.Serial(
layers.Residual( # Self-attention block.
layers.LayerNorm(),
layers.Branch(),
layers.Parallel(layers.Identity(), # activation for (q, k, v)
layers.CausalMask(axis=-2)), # attention mask
layers.MultiHeadedAttention(feature_depth, num_heads=num_heads,
dropout=dropout, mode=mode),
layers.Dropout(rate=dropout, mode=mode)
),
ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode)
) |
<SYSTEM_TASK:>
Transformer decoder layer operating on chunks.
<END_TASK>
<USER_TASK:>
Description:
def ChunkedDecoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
chunk_selector,
mode):
"""Transformer decoder layer operating on chunks.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend.
mode: str: 'train' or 'eval'
Returns:
the layer.
""" |
return layers.Serial(
layers.Residual( # Self-attention block.
layers.Map(layers.LayerNorm()),
layers.ChunkedCausalMultiHeadedAttention(
feature_depth, num_heads=num_heads, dropout=dropout,
chunk_selector=chunk_selector, mode=mode),
layers.Map(layers.Dropout(rate=dropout, mode=mode)),
),
layers.Map(ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode))
) |
<SYSTEM_TASK:>
Transformer language model operating on chunks.
<END_TASK>
<USER_TASK:>
Description:
def ChunkedTransformerLM(vocab_size,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
chunk_selector=None,
max_len=2048,
mode='train'):
"""Transformer language model operating on chunks.
The input to this model is a sequence presented as a list or tuple of chunks:
(chunk1, chunk2, chunks3, ..., chunkN).
Each chunk should have the same shape (batch, chunk-length) and together they
represent a long sequence that's a concatenation chunk1,chunk2,...,chunkN.
Chunked Transformer emulates the operation of a Transformer on this long
sequence except for the chunked attention layer, which may attend to only
a subset of the chunks to reduce memory use.
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend
(if None, attends to the previous chunks which is equivalent to setting
chunk_selector(x) = [] if x < 1 else [x-1] (TransformerXL); we attend
to the current chunk with a causal mask too, selected chunks unmasked).
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer.
""" |
stack = [ChunkedDecoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, chunk_selector, mode)
for _ in range(num_layers)]
# Below each Map(L) applies the layer L to each chunk independently.
return layers.Serial(
layers.ShiftRight(),
layers.Map(layers.Embedding(feature_depth, vocab_size)),
layers.Map(layers.Dropout(rate=dropout, mode=mode)),
layers.PositionalEncoding(max_len=max_len),
layers.Serial(*stack),
layers.Map(layers.LayerNorm()),
layers.Map(layers.Dense(vocab_size)),
layers.Map(layers.LogSoftmax()),
) |
<SYSTEM_TASK:>
Config for language-model experiments.
<END_TASK>
<USER_TASK:>
Description:
def mtf_transformer_paper_lm(size):
"""Config for language-model experiments.
Train these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Results:
size params/10^9 log-ppl(per-token)
-1 0.14 3.209
0 0.22 3.119
1 0.37 3.037
2 0.67 2.969
3 1.28 2.912
4 2.48 2.874
5 4.90 2.871
(to get word-level log-ppl, multiply by 1.1078)
Args:
size: an integer
Returns:
a hparams object
""" |
n = 2 ** size
hparams = mtf_transformer_base_lm()
hparams.batch_size = 256
hparams.d_model = 1024
hparams.d_ff = int(8192 * n)
hparams.d_kv = 256
hparams.num_heads = int(8 * n)
hparams.shared_embedding_and_softmax_weights = False
# one epoch for languagemodel_lm1b32k_packed = 13600 steps
hparams.learning_rate_decay_steps = 13600
return hparams |
<SYSTEM_TASK:>
Config for translation experiments.
<END_TASK>
<USER_TASK:>
Description:
def mtf_transformer_paper_tr(size):
"""Config for translation experiments.
Train these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Args:
size: an integer
Returns:
a hparams object
""" |
n = 2 ** size
hparams = mtf_transformer_base()
hparams.label_smoothing = 0.1
hparams.batch_size = 128
hparams.d_model = 1024
hparams.d_ff = int(4096 * n)
hparams.num_heads = int(8 * n)
hparams.shared_embedding_and_softmax_weights = False
# one epoch for translate_enfr_wmt32k_packed = 51400 steps
hparams.learning_rate_decay_steps = 51400
return hparams |
<SYSTEM_TASK:>
Small language model to run on 1 TPU.
<END_TASK>
<USER_TASK:>
Description:
def mtf_transformer_lm_baseline():
"""Small language model to run on 1 TPU.
Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs)
Results:
params/10^9 log-ppl(per-token)
0.14 3.202
Returns:
a hparams
""" |
hparams = mtf_transformer_paper_lm(-1)
hparams.batch_size = 128
hparams.learning_rate_decay_steps = 27200 # one epoch on lm1b
hparams.mesh_shape = "batch:8"
return hparams |
<SYSTEM_TASK:>
graph attention.
<END_TASK>
<USER_TASK:>
Description:
def graph_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True,
save_weights_to=None,
dropout_broadcast_dims=None,
adjacency_matrix=None,
num_edge_types=5):
"""graph attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
make_image_summary: True if you want an image summary.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
adjacency_matrix: optional matrix of [batch, length, length] ids indicating
edge type
num_edge_types: an int indicating number of edge types
Returns:
A Tensor of shape [batch, length, depth(q)]
""" |
with tf.variable_scope(
name, default_name="dot_product_attention", values=[q, k, v]) as scope:
# [batch, num_heads, query_length, memory_length]
logits = tf.matmul(q, k, transpose_b=True)
if adjacency_matrix is not None:
key_head_depth = common_layers.shape_list(q)[-1]
adjacency_vectors = make_edge_vectors(
adjacency_matrix,
num_edge_types,
key_head_depth,
name=name)
# transposing q to be [batch, length_q, heads, depth_k]
# to allow for matmul with [batch, length_q, length_q, depth_k]
q_t = tf.transpose(q, [0, 2, 1, 3])
adj_logits = tf.matmul(q_t, adjacency_vectors, transpose_b=True)
logits += tf.transpose(adj_logits, [0, 2, 1, 3])
# [batch, depth, num_nodes, num_nodes]
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if save_weights_to is not None:
save_weights_to[scope.name] = weights
# dropping out the attention links for each of the heads
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and make_image_summary:
common_attention.attention_image_summary(weights, image_shapes)
return tf.matmul(weights, v) |
<SYSTEM_TASK:>
Helper function that computes transformation for keys and values.
<END_TASK>
<USER_TASK:>
Description:
def _compute_edge_transforms(node_states,
depth,
num_transforms,
name="transform"):
"""Helper function that computes transformation for keys and values.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the transforms for keys or values for attention.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A tensor of shape [B, L, D]
depth: An integer (K or V)
num_transforms: An integer (T),
name: A name for the function
Returns:
x: A The attention keys or values for each node and edge type
(shape [B, N*T, K or V])
""" |
node_shapes = common_layers.shape_list(node_states)
x = common_layers.dense(
node_states,
depth * num_transforms,
use_bias=False,
name=name)
batch = node_shapes[0] # B.
length = node_shapes[1] # N.
# Making the fourth dimension explicit by separating the vectors of size
# K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T]
# (in k) and [V, T] in v.
#
x = tf.reshape(x, [batch, length, num_transforms, depth])
# Flatten out the fourth dimension.
x = tf.reshape(x, [batch, length * num_transforms, depth])
return x |
<SYSTEM_TASK:>
Computes query, key and value for edge matrices.
<END_TASK>
<USER_TASK:>
Description:
def compute_mpnn_qkv(node_states,
total_key_depth,
total_value_depth,
num_transforms):
"""Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. This
query is used to determine the relative weights to give to each of the
node's incoming edges.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A Tensor with shape [B, N, D].
total_key_depth: an integer (K).
total_value_depth: an integer (V).
num_transforms: a integer specifying number of transforms (T). This is
typically the number of edge types.
Returns:
q: The attention queries for each destination node (shape [B, N, K]).
k: The attention keys for each node and edge type (shape [B, N*T, K]).
v: The attention values for each node and edge type (shape [B, N*T, V]).
""" |
# node_states is initially a tensor with shape [B, N, D]. The call to dense
# creates a D x K kernel that serves as a fully-connected layer.
#
# For each possible batch b and node n in the first two dimensions of
# node_states, the corresponding size-D vector (the third dimension of
# node_states) is the hidden state for node n in batch b. Each of these size-D
# vectors is multiplied by the kernel to produce an attention query of size K.
# The result is a tensor of size [B, N, K] containing the attention queries
# for each node in each batch.
q = common_layers.dense(
node_states, total_key_depth, use_bias=False, name="q_mpnn")
# Creates the attention keys in a manner similar to the process of creating
# the attention queries. One key is created for each type of outgoing edge the
# corresponding node might have, meaning k will have shape [B, N, K*T].
k = _compute_edge_transforms(node_states,
total_key_depth,
num_transforms,
name="k_mpnn")
v = _compute_edge_transforms(node_states,
total_value_depth,
num_transforms,
name="v_mpnn")
return q, k, v |
<SYSTEM_TASK:>
Identical to sparse_ggnn except that each input has a batch dimension.
<END_TASK>
<USER_TASK:>
Description:
def sparse_message_pass_batched(node_states,
adjacency_matrices,
num_edge_types,
hidden_size,
use_bias=True,
average_aggregation=False,
name="sparse_ggnn_batched"):
"""Identical to sparse_ggnn except that each input has a batch dimension.
B = The batch size.
N = The number of nodes in each batch.
H = The size of the hidden states.
T = The number of edge types.
Args:
node_states: Initial states of each node in the graph. Shape: [B, N, H]
adjacency_matrices: Adjacency matrices of directed edges for each edge
type and batch. Shape: [B, N, N, T] (sparse).
num_edge_types: The number of edge types. T.
hidden_size: The size of the hidden layer. H.
use_bias: Whether to use bias in the hidden layer.
average_aggregation: How to aggregate the incoming node messages. If
average_aggregation is true, the messages are averaged. If it is false,
they are summed.
name: (optional) The scope within which tf variables should be created.
Returns:
The result of one round of message-passing of shape [B, N, H].
""" |
b, n = tf.shape(node_states)[0], tf.shape(node_states)[1]
# Flatten the batch dimension of the node states.
node_states = tf.reshape(node_states, [b*n, hidden_size])
# Flatten the batch dimension of the adjacency matrices.
indices = adjacency_matrices.indices
new_index2 = indices[:, 3] # The edge type dimension.
# Offset N x N adjacency matrix by the batch number in which it appears.
new_index0 = indices[:, 1] + indices[:, 0] * tf.cast(n, tf.int64)
new_index1 = indices[:, 2] + indices[:, 0] * tf.cast(n, tf.int64)
# Combine these indices as triples.
new_indices = tf.stack([new_index0, new_index1, new_index2], axis=1)
# Build the new sparse matrix.
new_shape = [tf.cast(b*n, tf.int64), tf.cast(b*n, tf.int64), num_edge_types]
adjacency_matrices = tf.SparseTensor(indices=new_indices,
values=adjacency_matrices.values,
dense_shape=new_shape)
# Run a message-passing step and return the result with the batch dimension.
node_states = sparse_message_pass(
node_states,
adjacency_matrices,
num_edge_types,
hidden_size,
use_bias=use_bias,
average_aggregation=average_aggregation,
name=name)
return tf.reshape(node_states, [b, n, hidden_size]) |
<SYSTEM_TASK:>
One message-passing step for a GNN with a sparse adjacency matrix.
<END_TASK>
<USER_TASK:>
Description:
def sparse_message_pass(node_states,
adjacency_matrices,
num_edge_types,
hidden_size,
use_bias=True,
average_aggregation=False,
name="sparse_ggnn"):
"""One message-passing step for a GNN with a sparse adjacency matrix.
Implements equation 2 (the message passing step) in
[Li et al. 2015](https://arxiv.org/abs/1511.05493).
N = The number of nodes in each batch.
H = The size of the hidden states.
T = The number of edge types.
Args:
node_states: Initial states of each node in the graph. Shape is [N, H].
adjacency_matrices: Adjacency matrix of directed edges for each edge
type. Shape is [N, N, T] (sparse tensor).
num_edge_types: The number of edge types. T.
hidden_size: The size of the hidden state. H.
use_bias: Whether to use bias in the hidden layer.
average_aggregation: How to aggregate the incoming node messages. If
average_aggregation is true, the messages are averaged. If it is false,
they are summed.
name: (optional) The scope within which tf variables should be created.
Returns:
The result of one step of Gated Graph Neural Network (GGNN) message passing.
Shape: [N, H]
""" |
n = tf.shape(node_states)[0]
t = num_edge_types
incoming_edges_per_type = tf.sparse_reduce_sum(adjacency_matrices, axis=1)
# Convert the adjacency matrix into shape [T, N, N] - one [N, N] adjacency
# matrix for each edge type. Since sparse tensor multiplication only supports
# two-dimensional tensors, we actually convert the adjacency matrix into a
# [T * N, N] tensor.
adjacency_matrices = tf.sparse_transpose(adjacency_matrices, [2, 0, 1])
adjacency_matrices = tf.sparse_reshape(adjacency_matrices, [t * n, n])
# Multiply the adjacency matrix by the node states, producing a [T * N, H]
# tensor. For each (edge type, node) pair, this tensor stores the sum of
# the hidden states of the node's neighbors over incoming edges of that type.
messages = tf.sparse_tensor_dense_matmul(adjacency_matrices, node_states)
# Rearrange this tensor to have shape [N, T * H]. The incoming states of each
# nodes neighbors are summed by edge type and then concatenated together into
# a single T * H vector.
messages = tf.reshape(messages, [t, n, hidden_size])
messages = tf.transpose(messages, [1, 0, 2])
messages = tf.reshape(messages, [n, t * hidden_size])
# Run each of those T * H vectors through a linear layer that produces
# a vector of size H. This process is equivalent to running each H-sized
# vector through a separate linear layer for each edge type and then adding
# the results together.
#
# Note that, earlier on, we added together all of the states of neighbors
# that were connected by edges of the same edge type. Since addition and
# multiplying by a linear layer are commutative, this process was equivalent
# to running each incoming edge through a linear layer separately and then
# adding everything at the end.
with tf.variable_scope(name, default_name="sparse_ggnn"):
final_node_states = common_layers.dense(
messages, hidden_size, use_bias=False)
# Multiply the bias by for each edge type by the number of incoming nodes
# of that edge type.
if use_bias:
bias = tf.get_variable("bias", initializer=tf.zeros([t, hidden_size]))
final_node_states += tf.matmul(incoming_edges_per_type, bias)
if average_aggregation:
incoming_edges = tf.reduce_sum(incoming_edges_per_type, -1, keepdims=True)
incoming_edges = tf.tile(incoming_edges, [1, hidden_size])
final_node_states /= incoming_edges + 1e-7
return tf.reshape(final_node_states, [n, hidden_size]) |
<SYSTEM_TASK:>
Dot product attention with edge vectors.
<END_TASK>
<USER_TASK:>
Description:
def dot_product_mpnn_attention(q,
k,
v,
adjacency_matrix,
num_edge_types,
num_transforms=None,
use_weighted_sum=False,
name=None):
"""Dot product attention with edge vectors.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let K be the size of the attention keys/queries.
Let V be the size of the attention values.
Let T be the total number of transforms (num_transforms).
Args:
q: The query Tensor of shape [B, N, K].
k: The key Tensor of shape [B, T, N, K].
v: The value Tensor of shape [B, T, N, V].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge
from node j to node i in batch b. A standard adjacency matrix will only
have one edge type while a mutigraph will have multiple edge types.
num_edge_types: An integer specifying number of edge types.
num_transforms: An integer indicating number of transforms (T). If None,
then num_transforms will be equal to num_edge_types.
use_weighted_sum: If False, will only use a single transform per edge type.
Otherwise, use a learned weighted sum of transforms per edge type.
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
""" |
with tf.variable_scope(
name,
default_name="dot_product_mpnn_attention",
values=[q, k, v, adjacency_matrix, num_edge_types]):
# If not explicitly set, use num_transforms set to num_edge_types.
num_transforms = (
num_edge_types if num_transforms is None else num_transforms)
if not use_weighted_sum and num_transforms != num_edge_types:
raise ValueError("num_transforms must equal num_edge_types unless "
"use_weighted_sum is True")
# Computes the raw dot-product attention values between each query and
# the corresponding keys it needs to consider.
#
# This operation takes the dot product of (the query for
# each node) and (the key for each node for each possible edge type),
# creating an N x N matrix for each edge type. The entry at index (i, j)
# is the dot-product for the edge from node i to node j of the appropriate
# type. These dot products will eventually become attention weights
# specifying how much node i weights an edge of that type coming from node
# j.
all_edge_logits = tf.matmul(
tf.tile(tf.expand_dims(q, axis=1), [1, num_edge_types, 1, 1]),
k,
transpose_b=True)
# The adjacency matrix assumes there is only one directed edge (i <- j) for
# each pair of nodes. If such an edge exists, it contains the integer
# type of that edge at position (i, j) of the adjacency matrix.
#
# Construct edge_vectors of shape [B, N, N, T].
if use_weighted_sum:
# Use dense representation for edge vectors.
edge_vectors = make_edge_vectors(
adjacency_matrix,
num_edge_types,
num_transforms)
else:
# Generate one-hot vectors based on edge types.
# If there is an edge from node j to node i of type t, then index t of the
# last dimension is 1 for entry (i, j) of the second and third dimensions.
edge_vectors = tf.one_hot(adjacency_matrix, num_transforms)
# Rearranging the dimensions to match the shape of all_edge_logits.
edge_vectors = tf.transpose(edge_vectors, [0, 3, 1, 2])
# Element-wise multiplies all_edge_logits and edge_vectors.
#
# In other words: all_edge_logits contains N x N matrices of query-key
# products. This element-wise multiplication zeroes out entries that do not
# correspond to actual edges in the graph of the appropriate edge type.
# all_edge_logits retains shape [B, T, N, N].
all_edge_logits *= edge_vectors
# Since there can only be one edge from node A to node B, we can collapse
# the T different adjacency matrices containing key-query pairs into one
# adjacency matrix. logits is [B, N, N].
# TODO(dbieber): Use a reshape instead of reduce sum to attend over all
# edges instead of over all neighboring nodes to handle the multigraph case.
logits = tf.reduce_sum(all_edge_logits, axis=1)
# For pairs of nodes with no edges between them, add a large negative bias
# to each location without an edge so that the softmax of entries with the
# value 0 become a small negative number instead.
bias = 0
bias = tf.to_float(tf.equal(
tf.reduce_sum(adjacency_matrix, axis=-1), 0)) * -1e9
logits += bias
# Turn the raw key-query products into a probability distribution (or,
# in terms of attention, weights). The softmax is computed across the
# last dimension of logits.
compatibility = tf.nn.softmax(logits) # Shape [B, N, N].
# Computes a summary showing the attention matrix as an image. Does not do
# any work toward actually performing attention.
common_attention.attention_image_summary(
tf.expand_dims(compatibility, axis=1), None)
# Repeats the attention matrix T times for each batch, producing
# a tensor with shape [B, T, N, N] where the [N, N] component is T
# repeats of the values found in compatibility.
edge_compatibility = tf.tile(
tf.expand_dims(compatibility, axis=1), [1, num_edge_types, 1, 1])
# Zeroes out the entries in edge_compatibility that do not correspond to
# actual edges.
edge_compatibility *= edge_vectors # Shape [B, T, N, N].
output = compute_values(edge_compatibility, v)
return output |
<SYSTEM_TASK:>
ggnn version of the MPNN from Gilmer et al.
<END_TASK>
<USER_TASK:>
Description:
def ggnn_fast_dense(node_states,
adjacency_matrix,
num_edge_types,
total_value_depth,
name=None):
"""ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
""" |
# between the same nodes (with only one edge of each type. adjacency_matrix
# will need to be converted to shape [B, T, N, N].
with tf.variable_scope(
name,
default_name="ggnn_fast_dense",
values=[node_states, adjacency_matrix, num_edge_types]):
nodes_shape = common_layers.shape_list(node_states)
v = _compute_edge_transforms(node_states,
total_value_depth,
num_edge_types,
name="v_mpnn")
v = tf.reshape(v, [nodes_shape[0], nodes_shape[1], num_edge_types,
total_value_depth
]) # Shape [B, N, T, V].
v = tf.transpose(v, [0, 2, 1, 3]) # Shape [B, T, N, V].
# Rearranging the dimensions to match the shape of all_edge_logits.
edge_vectors = tf.transpose(adjacency_matrix, [0, 3, 1, 2])
output = compute_values(edge_vectors, v)
return output |
<SYSTEM_TASK:>
Compute values. If edge compatibilities is just adjacency, we get ggnn.
<END_TASK>
<USER_TASK:>
Description:
def compute_values(edge_compatibility, v):
"""Compute values. If edge compatibilities is just adjacency, we get ggnn.
Args:
edge_compatibility: A tensor of shape [batch, num_transforms, length, depth]
v: A tensor of shape [batch, num_transforms, length, depth]
Returns:
output: A [batch, length, depth] tensor
""" |
# Computes the incoming value vectors for each node by weighting them
# according to the attention weights. These values are still segregated by
# edge type.
# Shape = [B, T, N, V].
all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v)
# Combines the weighted value vectors together across edge types into a
# single N x V matrix for each batch.
output = tf.reduce_sum(all_edge_values, axis=1) # Shape [B, N, V].
return output |
<SYSTEM_TASK:>
Precompute the a_in and a_out tensors.
<END_TASK>
<USER_TASK:>
Description:
def precompute_edge_matrices(adjacency, hparams):
"""Precompute the a_in and a_out tensors.
(we don't want to add to the graph everytime _fprop is called)
Args:
adjacency: placeholder of real valued vectors of shape [B, L, L, E]
hparams: HParams object
Returns:
edge_matrices: [batch, L * D, L * D] the dense matrix for message passing
viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function
of the edge vector of the adjacency matrix at that spot.
""" |
batch_size, num_nodes, _, edge_dim = common_layers.shape_list(adjacency)
# build the edge_network for incoming edges
with tf.variable_scope("edge_network"):
x = tf.reshape(
adjacency, [batch_size * num_nodes * num_nodes, edge_dim],
name="adj_reshape_in")
for ip_layer in range(hparams.edge_network_layers):
name = "edge_network_layer_%d"%ip_layer
x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),
hparams.edge_network_hidden_size,
activation=tf.nn.relu,
name=name)
x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),
hparams.hidden_size**2,
activation=None,
name="edge_network_output")
# x = [batch * l * l, d *d]
edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes,
num_nodes, hparams.hidden_size,
hparams.hidden_size])
# reshape to [batch, l * d, l *d]
edge_matrices = tf.reshape(
tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [
-1, num_nodes * hparams.hidden_size,
num_nodes * hparams.hidden_size
],
name="edge_matrices")
return edge_matrices |
<SYSTEM_TASK:>
generate_files but with a single writer writing to shard task_id.
<END_TASK>
<USER_TASK:>
Description:
def generate_files_distributed(generator,
output_name,
output_dir,
num_shards=1,
max_cases=None,
task_id=0):
"""generate_files but with a single writer writing to shard task_id.""" |
assert task_id < num_shards
output_filename = sharded_name(output_name, task_id, num_shards)
output_file = os.path.join(output_dir, output_filename)
tf.logging.info("Writing to file %s", output_file)
writer = tf.python_io.TFRecordWriter(output_file)
counter = 0
for case in generator:
if counter % 100000 == 0:
tf.logging.info("Generating case %d for %s." % (counter, output_name))
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writer.write(example.SerializeToString())
writer.close()
return output_file |
<SYSTEM_TASK:>
Generate cases from a generator and save as TFRecord files.
<END_TASK>
<USER_TASK:>
Description:
def generate_files(generator, output_filenames,
max_cases=None, cycle_every_n=1):
"""Generate cases from a generator and save as TFRecord files.
Generated cases are transformed to tf.Example protos and saved as TFRecords
in sharded files named output_dir/output_name-00..N-of-00..M=num_shards.
Args:
generator: a generator yielding (string -> int/float/str list) dictionaries.
output_filenames: List of output file paths.
max_cases: maximum number of cases to get from the generator;
if None (default), we use the generator until StopIteration is raised.
cycle_every_n: how many cases from the generator to take before
switching to the next shard; by default set to 1, switch every case.
""" |
if outputs_exist(output_filenames):
tf.logging.info("Skipping generator because outputs files exists at {}"
.format(output_filenames))
return
tmp_filenames = [fname + ".incomplete" for fname in output_filenames]
num_shards = len(output_filenames)
# Check if is training or eval, ref: train_data_filenames().
if num_shards > 0:
if "-train" in output_filenames[0]:
tag = "train"
elif "-dev" in output_filenames[0]:
tag = "eval"
else:
tag = "other"
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames]
counter, shard = 0, 0
for case in generator:
if case is None:
continue
if counter % 100000 == 0:
tf.logging.info("Generating case %d." % counter)
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writers[shard].write(example.SerializeToString())
if counter % cycle_every_n == 0:
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filenames, output_filenames):
tf.gfile.Rename(tmp_name, final_name)
if num_shards > 0:
if tag == "train":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES, value=counter)
elif tag == "eval":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES, value=counter)
tf.logging.info("Generated %s Examples", counter) |
<SYSTEM_TASK:>
Report hook for download progress.
<END_TASK>
<USER_TASK:>
Description:
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
""" |
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r") |
<SYSTEM_TASK:>
Download filename from uri unless it's already in directory.
<END_TASK>
<USER_TASK:>
Description:
def maybe_download(directory, filename, uri):
"""Download filename from uri unless it's already in directory.
Copies a remote file to local if that local file does not already exist. If
the local file pre-exists this function call, it does not check that the local
file is a copy of the remote.
Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a
URL.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
uri: URI to copy (or download) from.
Returns:
The path to the downloaded file.
""" |
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
tf.logging.info("Downloading %s to %s" % (uri, filepath))
try:
tf.gfile.Copy(uri, filepath)
except tf.errors.UnimplementedError:
if uri.startswith("http"):
inprogress_filepath = filepath + ".incomplete"
inprogress_filepath, _ = urllib.urlretrieve(
uri, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress
print()
tf.gfile.Rename(inprogress_filepath, filepath)
else:
raise ValueError("Unrecognized URI: " + filepath)
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." %
(filename, statinfo.st_size))
return filepath |
<SYSTEM_TASK:>
Download filename from Google drive unless it's already in directory.
<END_TASK>
<USER_TASK:>
Description:
def maybe_download_from_drive(directory, filename, url):
"""Download filename from Google drive unless it's already in directory.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
url: URL to download from.
Returns:
The path to the downloaded file.
""" |
if not tf.gfile.Exists(directory):
tf.logging.info("Creating directory %s" % directory)
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
confirm_token = None
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
# Since the file is big, drive will scan it for virus and take it to a
# warning page. We find the confirm token on this page and append it to the
# URL to start the download process.
confirm_token = None
session = requests.Session()
response = session.get(url, stream=True)
for k, v in response.cookies.items():
if k.startswith("download_warning"):
confirm_token = v
if confirm_token:
url = url + "&confirm=" + confirm_token
tf.logging.info("Downloading %s to %s" % (url, filepath))
response = session.get(url, stream=True)
# Now begin the download.
chunk_size = 16 * 1024
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
# Print newline to clear the carriage return from the download progress
print()
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." % (filename,
statinfo.st_size))
return filepath |
<SYSTEM_TASK:>
Unzips from gz_path into new_path.
<END_TASK>
<USER_TASK:>
Description:
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path.
Args:
gz_path: path to the zipped file.
new_path: path to where the file will be unzipped.
""" |
if tf.gfile.Exists(new_path):
tf.logging.info("File %s already exists, skipping unpacking" % new_path)
return
tf.logging.info("Unpacking %s to %s" % (gz_path, new_path))
# We may be unpacking into a newly created directory, add write mode.
mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH
os.chmod(os.path.dirname(new_path), mode)
with gzip.open(gz_path, "rb") as gz_file:
with tf.gfile.GFile(new_path, mode="wb") as new_file:
for line in gz_file:
new_file.write(line) |
<SYSTEM_TASK:>
Inner implementation for vocab generators.
<END_TASK>
<USER_TASK:>
Description:
def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generator, max_subtoken_length=None,
reserved_tokens=None):
"""Inner implementation for vocab generators.
Args:
data_dir: The base directory where data and vocab files are stored. If None,
then do not save the vocab even if it doesn't exist.
vocab_filename: relative filename where vocab file is stored
vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
generator: a generator that produces tokens from the vocabulary
max_subtoken_length: an optional integer. Set this to a finite value to
avoid quadratic costs during vocab building.
reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
should be a prefix of `reserved_tokens`. If `None`, defaults to
`RESERVED_TOKENS`.
Returns:
A SubwordTextEncoder vocabulary object.
""" |
if data_dir and vocab_filename:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
return text_encoder.SubwordTextEncoder(vocab_filepath)
else:
vocab_filepath = None
tf.logging.info("Generating vocab file: %s", vocab_filepath)
vocab = text_encoder.SubwordTextEncoder.build_from_generator(
generator, vocab_size, max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
if vocab_filepath:
tf.gfile.MakeDirs(data_dir)
vocab.store_to_file(vocab_filepath)
return vocab |
<SYSTEM_TASK:>
Generate a vocabulary from the datasets in sources.
<END_TASK>
<USER_TASK:>
Description:
def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size,
sources, file_byte_budget=1e6,
max_subtoken_length=None):
"""Generate a vocabulary from the datasets in sources.""" |
vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget)
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
vocab_generator, max_subtoken_length) |
<SYSTEM_TASK:>
r"""Generate a vocabulary from a tabbed source file.
<END_TASK>
<USER_TASK:>
Description:
def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename,
index, vocab_filename, vocab_size):
r"""Generate a vocabulary from a tabbed source file.
The source is a file of source, target pairs, where each line contains
a source string and a target string, separated by a tab ('\t') character.
The index parameter specifies 0 for the source or 1 for the target.
Args:
data_dir: path to the data directory.
tmp_dir: path to the temporary directory.
source_filename: the name of the tab-separated source file.
index: index.
vocab_filename: the name of the vocabulary file.
vocab_size: vocabulary size.
Returns:
The vocabulary.
""" |
def generate():
filepath = os.path.join(tmp_dir, source_filename)
tf.logging.info("Generating vocab from %s", filepath)
with tf.gfile.GFile(filepath, mode="r") as source_file:
for line in source_file:
line = line.strip()
if line and "\t" in line:
parts = line.split("\t", 1)
part = parts[index].strip()
yield part
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate()) |
<SYSTEM_TASK:>
Generate a vocabulary from txt files with example-per-line.
<END_TASK>
<USER_TASK:>
Description:
def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size,
filepatterns):
"""Generate a vocabulary from txt files with example-per-line.""" |
if isinstance(filepatterns, str):
filepatterns = [filepatterns]
def generate():
tf.logging.info("Generating vocab from %s", filepatterns)
for filepattern in filepatterns:
for filename in tf.gfile.Glob(filepattern):
with tf.gfile.GFile(filename, mode="r") as source_file:
for line in source_file:
yield line.strip()
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate()) |
<SYSTEM_TASK:>
Shuffle a single file of records.
<END_TASK>
<USER_TASK:>
Description:
def _shuffle_single(fname, extra_fn=None):
"""Shuffle a single file of records.
Args:
fname: a string
extra_fn: an optional function from list of TFRecords to list of TFRecords
to be called after shuffling.
""" |
records = read_records(fname)
random.shuffle(records)
if extra_fn is not None:
records = extra_fn(records)
out_fname = fname.replace(UNSHUFFLED_SUFFIX, "")
write_records(records, out_fname)
tf.gfile.Remove(fname) |
<SYSTEM_TASK:>
Shuffles the dataset.
<END_TASK>
<USER_TASK:>
Description:
def shuffle_dataset(filenames, extra_fn=None):
"""Shuffles the dataset.
Args:
filenames: a list of strings
extra_fn: an optional function from list of records to list of records
to be called after shuffling a file.
""" |
if outputs_exist(filenames):
tf.logging.info("Skipping shuffle because output files exist")
return
tf.logging.info("Shuffling data...")
for filename in filenames:
_shuffle_single(filename, extra_fn=extra_fn)
tf.logging.info("Data shuffled.") |
<SYSTEM_TASK:>
Pack examples into longer examples.
<END_TASK>
<USER_TASK:>
Description:
def pack_examples(examples,
has_inputs,
packed_length=256,
spacing=2,
queue_size=10,
chop_long_sequences=False):
"""Pack examples into longer examples.
If has_inputs=False, we are packing single-sequence examples with
targets only and no inputs.
In this case, we concatenate the targets from several examples to form
each new example. We insert a number of zeros for spacing between the
original sequences. This is to help the sequences stay separate
under convolutions. If chop_long_sequences is set, then any input sequence
longer than packed_length gets chopped up into multiple examples. Otherwise,
long sequences are emitted as singletons.
If has_inputs=True, then we are packing sequence-to-sequence
examples. We combine several examples by concatenating the inputs
(as above) and concatenating the targets (as above). Chopping of
long sequences is not supported.
The packed examples are represented as dictionaries containing:
"inputs", "targets": the packed sequences described above
"inputs_segmentation", "targets_segmentation":
Sequences aligned with "inputs", "targets" specifying to which original
sequence each position belongs. Numbering starts from 1, and 0 is used
for spacing. This information is useful for preventing attention across
segments.
e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4]
"inputs_position", "targets_position":
Sequences aligned with "inputs", "targets" specifying position within
the original sequence. This is useful for positional encodings.
e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2]
Args:
examples: a generator returning feature dictionaries.
has_inputs: a boolean
packed_length: an integer
spacing: an integer
queue_size: an integer
chop_long_sequences: a boolean
Yields:
feature dictionaries.
""" |
packer = SequencePairPacker if has_inputs else SequencePacker
combined = []
for example in examples:
x = ((example["inputs"], example["targets"])
if has_inputs else example["targets"])
if chop_long_sequences and len(x) > packed_length:
assert not has_inputs
num_fragments = len(x) // packed_length
for i in range(num_fragments):
yield packer(
x[packed_length * i:packed_length * (i + 1)], spacing).to_dict()
x = x[packed_length * num_fragments:]
added = False
for c in combined:
if c.can_fit(x, packed_length):
c.add(x)
added = True
break
if not added:
if len(combined) == queue_size:
yield combined[0].to_dict()
combined = combined[1:]
combined.append(packer(x, spacing))
for c in combined:
yield c.to_dict() |
<SYSTEM_TASK:>
Iterate over the records on disk for the Problem.
<END_TASK>
<USER_TASK:>
Description:
def tfrecord_iterator_for_problem(problem, data_dir,
dataset_split=tf.estimator.ModeKeys.TRAIN):
"""Iterate over the records on disk for the Problem.""" |
filenames = tf.gfile.Glob(problem.filepattern(data_dir, mode=dataset_split))
example_spec = problem.example_reading_spec()[0]
return tfrecord_iterator(filenames, example_spec=example_spec) |
<SYSTEM_TASK:>
Yields records from TFRecord files.
<END_TASK>
<USER_TASK:>
Description:
def tfrecord_iterator(filenames, gzipped=False, example_spec=None):
"""Yields records from TFRecord files.
Args:
filenames: list<str>, list of TFRecord filenames to read from.
gzipped: bool, whether the TFRecord files are gzip-encoded.
example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
if provided, will parse each record as a tensorflow.Example proto.
Yields:
Records (or parsed Examples, if example_spec is provided) from files.
""" |
with tf.Graph().as_default():
dataset = tf.data.Dataset.from_tensor_slices(filenames)
def _load_records(filename):
return tf.data.TFRecordDataset(
filename,
compression_type=tf.constant("GZIP") if gzipped else None,
buffer_size=16 * 1000 * 1000)
dataset = dataset.flat_map(_load_records)
def _parse_example(ex_ser):
return tf.parse_single_example(ex_ser, example_spec)
if example_spec:
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
yield ex
except tf.errors.OutOfRangeError:
break |
<SYSTEM_TASK:>
Create a fill-in-the-blanks training example from text.
<END_TASK>
<USER_TASK:>
Description:
def random_deinterleave(text, separator_symbol="X"):
"""Create a fill-in-the-blanks training example from text.
Split on spaces, then cut into segments at random points. Alternate segments
are assigned to the two output strings. separator_symbol separates segments
within each of the outputs.
example:
text="The quick brown fox jumps over the lazy dog."
returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.")
The two outputs can also be reversed to yield an instance of the same problem.
Args:
text: a string
separator_symbol: a string
Returns:
a pair of strings
""" |
words = text.strip().split(" ")
n = len(words)
if n <= 1:
return text, ""
cut = [False] * n
cut[0] = True
num_cuts = int(math.exp(random.uniform(0, math.log(n))))
for _ in range(num_cuts):
cut[random.randint(1, n -1)] = True
out = [[], []]
part = random.randint(0, 1)
for i in range(n):
if cut[i]:
out[part].append(separator_symbol)
part = 1 - part
out[part].append(words[i])
return " ".join(out[0]), " ".join(out[1]) |
<SYSTEM_TASK:>
Helper to determine the shape of reorder output.
<END_TASK>
<USER_TASK:>
Description:
def _reorder_shape(input_shape, output=None): # pylint: disable=invalid-name
"""Helper to determine the shape of reorder output.""" |
if output is None:
return input_shape
return base.nested_map(output, lambda i: input_shape[i]) |
<SYSTEM_TASK:>
Reorder a tuple into another tuple.
<END_TASK>
<USER_TASK:>
Description:
def Reorder(x, params, output=None, **kwargs):
"""Reorder a tuple into another tuple.
For example, we can re-order (x, y) into (y, x) or even (y, (x, y), y).
The output argument specifies how to re-order, using integers that refer
to indices in the input tuple. For example, if
input = (x, y, z)
then
Reorder(input, output=(1, 0, 2)) = (y, x, z)
Reorder(input, output=(0, 0)) = (x, x)
Reorder(input, output=(0, (1, 1))) = (x, (y, y))
Reorder(input, output=((2, 0), (1, 1))) = ((z, x), (y, y))
By default (if no output is given) Reorder does nothing (Identity).
Args:
x: the input tuple to re-order.
params: layer parameters (unused).
output: the specification of the output tuple: a nested tuple of ints.
**kwargs: other arguments (unused).
Returns:
The re-ordered tuple with the same shape as output.
""" |
del params, kwargs
if output is None:
return x
return base.nested_map(output, lambda i: x[i]) |
<SYSTEM_TASK:>
Helper to determine the shape of Concatenate output.
<END_TASK>
<USER_TASK:>
Description:
def _concatenate_shape(input_shape, axis=-1): # pylint: disable=invalid-name
"""Helper to determine the shape of Concatenate output.""" |
ax = axis % len(input_shape[0])
concat_size = sum(shape[ax] for shape in input_shape)
out_shape = input_shape[0][:ax] + (concat_size,) + input_shape[0][ax+1:]
return out_shape |
<SYSTEM_TASK:>
Constructs a residual version of layers, summing input to layers output.
<END_TASK>
<USER_TASK:>
Description:
def Residual(*layers, **kwargs):
"""Constructs a residual version of layers, summing input to layers output.""" |
shortcut = kwargs.get('shortcut', Identity()) # pylint: disable=no-value-for-parameter
if len(layers) > 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(Serial(*layers), shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
elif len(layers) == 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(layers[0], shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
else:
raise ValueError('Empty residual combinator.') |
<SYSTEM_TASK:>
Adds default hparams for all of the variants of the Universal Transformer.
<END_TASK>
<USER_TASK:>
Description:
def update_hparams_for_universal_transformer(hparams):
"""Adds default hparams for all of the variants of the Universal Transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for Universal Transformers hyper-parameters
""" |
hparams.daisy_chain_variables = False # Breaks multi-gpu in while loops.
# If not None, mixes vanilla transformer with Universal Transformer.
# Options: None, "before_ut", and "after_ut".
hparams.add_hparam("mix_with_transformer", None)
# Number of vanilla transformer layers used to be mixed with u-transofmer.
hparams.add_hparam("num_mixedin_layers", 2)
# Number of transformer layers within the recurrent block (default is 1).
hparams.add_hparam("num_inrecurrence_layers", 1)
# Type of recurrency:
# basic, highway, skip, dwa, act, rnn, gru, lstm.
hparams.add_hparam("recurrence_type", "basic")
# Number of steps (which is equivalent to num layer in transformer).
hparams.add_hparam("num_rec_steps", hparams.num_hidden_layers)
# Add the positional mebedding at each step(horisontal timing)
hparams.add_hparam("add_position_timing_signal", True)
if hparams.add_position_timing_signal:
hparams.pos = None
# Logic of position shifting when using timing signal:
# None, "random", "step"
hparams.add_hparam("position_start_index", None)
# Add an step embedding at each step (vertical timing)
hparams.add_hparam("add_step_timing_signal", True)
# Either "learned" or "sinusoid"
hparams.add_hparam("step_timing_signal_type", "learned")
# Add or concat the timing signal (applied both on position and step timing).
# Options: "add" and "concat".
hparams.add_hparam("add_or_concat_timing_signal", "add")
# Add SRU at the beginning of each Universal Transformer step.
# This can be considered as a position timing signal
hparams.add_hparam("add_sru", False)
# Default ffn layer is separable convolution.
# Options: "fc" and "sepconv".
hparams.add_hparam("transformer_ffn_type", "fc")
# Transform bias (in models with highway or skip connection).
hparams.add_hparam("transform_bias_init", -1.0)
hparams.add_hparam("couple_carry_transform_gates", True)
# Depth-wise attention (grid-transformer!) hparams:
# Adds depth embedding, if true.
hparams.add_hparam("depth_embedding", True)
# Learns attention weights for elements (instead of positions), if true.
hparams.add_hparam("dwa_elements", True)
# Type of ffn_layer used for gate in skip, highway, etc.
# "dense" or "dense_dropconnect".
# With dense_relu_dense, the bias/kernel initializations will not be applied.
hparams.add_hparam("gate_ffn_layer", "dense")
# LSTM forget bias for lstm style recurrence.
hparams.add_hparam("lstm_forget_bias", 1.0)
# Uses the memory at the last step as the final output, if true.
hparams.add_hparam("use_memory_as_final_state", False)
# if also add a ffn unit to the transition function when using gru/lstm
hparams.add_hparam("add_ffn_unit_to_the_transition_function", False)
# Type of act: basic/accumulated/global (instead of position-wise!)/random.
hparams.add_hparam("act_type", "basic")
# Max number of steps (forces halting at this step).
hparams.add_hparam("act_max_steps", 2 * hparams.num_hidden_layers)
hparams.add_hparam("act_halting_bias_init", 1.0)
hparams.add_hparam("act_epsilon", 0.01)
hparams.add_hparam("act_loss_weight", 0.01)
return hparams |
<SYSTEM_TASK:>
Base parameters for Universal Transformer.
<END_TASK>
<USER_TASK:>
Description:
def universal_transformer_base():
"""Base parameters for Universal Transformer.""" |
hparams = transformer.transformer_base()
# To have a similar capacity to the transformer_base with 6 layers,
# we need to increase the size of the UT's layer
# since, in fact, UT has a single layer repeating multiple times.
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
hparams = update_hparams_for_universal_transformer(hparams)
return hparams |
<SYSTEM_TASK:>
Multi-layer config for adaptive Transformer on TPU.
<END_TASK>
<USER_TASK:>
Description:
def adaptive_universal_transformer_multilayer_tpu():
"""Multi-layer config for adaptive Transformer on TPU.""" |
hparams = adaptive_universal_transformer_base_tpu()
hparams.num_inrecurrence_layers = 2
hparams.mix_with_transformer = "before_ut,after_ut"
hparams.num_mixedin_layers = 1
hparams.transformer_ffn_type = "sepconv"
# TODO(lukaszkaiser): the options below don't work on TPU yet, make them work.
# hparams.add_step_timing_signal = True
# hparams.add_sru = True
# hparams.self_attention_type = "dot_product_relative_v2"
# hparams.max_relative_position = 256
return hparams |
<SYSTEM_TASK:>
Multi-layer config for adaptive Transformer with hard attention.
<END_TASK>
<USER_TASK:>
Description:
def adaptive_universal_transformer_multilayer_hard():
"""Multi-layer config for adaptive Transformer with hard attention.""" |
hparams = adaptive_universal_transformer_multilayer_tpu()
hparams.batch_size = 256
hparams.hard_attention_k = 8
hparams.add_step_timing_signal = True
# hparams.add_sru = True # This is very slow on GPUs, does it help?
hparams.self_attention_type = "dot_product_relative_v2"
hparams.max_relative_position = 256
return hparams |
<SYSTEM_TASK:>
Build convolutional GRU with diagonal gating as in ImprovedNGPU.
<END_TASK>
<USER_TASK:>
Description:
def ConvDiagonalGRU(units, kernel_size=(3, 3)):
"""Build convolutional GRU with diagonal gating as in ImprovedNGPU.""" |
def BuildConv():
return layers.Conv(filters=units, kernel_size=kernel_size, padding='SAME')
return layers.GeneralGRUCell(
candidate_transform=BuildConv,
memory_transform=DiagonalGate,
gate_nonlinearity=layers.HardSigmoid,
candidate_nonlinearity=layers.HardTanh) |
<SYSTEM_TASK:>
Escape away underscores and OOV characters and append '_'.
<END_TASK>
<USER_TASK:>
Description:
def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
""" |
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_" |
<SYSTEM_TASK:>
Transform a human-readable string into a sequence of int ids.
<END_TASK>
<USER_TASK:>
Description:
def encode(self, s):
"""Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
""" |
return [int(w) + self._num_reserved_ids for w in s.split()] |
<SYSTEM_TASK:>
Transform a sequence of int ids into a human-readable string.
<END_TASK>
<USER_TASK:>
Description:
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
s: human-readable string.
""" |
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return " ".join(self.decode_list(ids)) |
<SYSTEM_TASK:>
Transform a sequence of int ids into a their string versions.
<END_TASK>
<USER_TASK:>
Description:
def decode_list(self, ids):
"""Transform a sequence of int ids into a their string versions.
This method supports transforming individual input/output ids to their
string versions so that sequence to/from text conversions can be visualized
in a human readable format.
Args:
ids: list of integers to be converted.
Returns:
strs: list of human-readable string.
""" |
decoded_ids = []
for id_ in ids:
if 0 <= id_ < self._num_reserved_ids:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
decoded_ids.append(id_ - self._num_reserved_ids)
return [str(d) for d in decoded_ids] |
<SYSTEM_TASK:>
Converts a space-separated string of tokens to a list of ids.
<END_TASK>
<USER_TASK:>
Description:
def encode(self, s):
"""Converts a space-separated string of tokens to a list of ids.""" |
sentence = s
tokens = sentence.strip().split()
if self._replace_oov is not None:
tokens = [t if t in self._token_to_id else self._replace_oov
for t in tokens]
ret = [self._token_to_id[tok] for tok in tokens]
return ret[::-1] if self._reverse else ret |
<SYSTEM_TASK:>
Initialize tokens from a list of tokens.
<END_TASK>
<USER_TASK:>
Description:
def _init_vocab_from_list(self, vocab_list):
"""Initialize tokens from a list of tokens.
It is ok if reserved tokens appear in the vocab list. They will be
removed. The set of tokens in vocab_list should be unique.
Args:
vocab_list: A list of tokens.
""" |
def token_gen():
for token in vocab_list:
if token not in RESERVED_TOKENS:
yield token
self._init_vocab(token_gen()) |
<SYSTEM_TASK:>
Initialize vocabulary with tokens from token_generator.
<END_TASK>
<USER_TASK:>
Description:
def _init_vocab(self, token_generator, add_reserved_tokens=True):
"""Initialize vocabulary with tokens from token_generator.""" |
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start=non_reserved_start_index))
# _token_to_id is the reverse of _id_to_token
self._token_to_id = dict((v, k)
for k, v in six.iteritems(self._id_to_token)) |
<SYSTEM_TASK:>
Write vocab file to disk.
<END_TASK>
<USER_TASK:>
Description:
def store_to_file(self, filename):
"""Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
""" |
with tf.gfile.Open(filename, "w") as f:
for i in range(len(self._id_to_token)):
f.write(self._id_to_token[i] + "\n") |
<SYSTEM_TASK:>
Converts a sequence of subtoken ids to a native string.
<END_TASK>
<USER_TASK:>
Description:
def decode(self, ids, strip_extraneous=False):
"""Converts a sequence of subtoken ids to a native string.
Args:
ids: a list of integers in the range [0, vocab_size)
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
a native string
""" |
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return unicode_to_native(
tokenizer.decode(self._subtoken_ids_to_tokens(ids))) |
<SYSTEM_TASK:>
Converts a list of tokens to a list of subtoken ids.
<END_TASK>
<USER_TASK:>
Description:
def _tokens_to_subtoken_ids(self, tokens):
"""Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
""" |
ret = []
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
return ret |
<SYSTEM_TASK:>
Converts token to a list of subtoken ids.
<END_TASK>
<USER_TASK:>
Description:
def _token_to_subtoken_ids(self, token):
"""Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
""" |
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet))
self._cache[cache_location] = (token, ret)
return ret |
<SYSTEM_TASK:>
Converts a list of subtoken ids to a list of tokens.
<END_TASK>
<USER_TASK:>
Description:
def _subtoken_ids_to_tokens(self, subtokens):
"""Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings.
""" |
concatenated = "".join(
[self._subtoken_id_to_subtoken_string(s) for s in subtokens])
split = concatenated.split("_")
ret = []
for t in split:
if t:
unescaped = _unescape_token(t + "_")
if unescaped:
ret.append(unescaped)
return ret |
<SYSTEM_TASK:>
Converts an escaped token string to a list of subtoken IDs.
<END_TASK>
<USER_TASK:>
Description:
def _escaped_token_to_subtoken_ids(self, escaped_token):
"""Converts an escaped token string to a list of subtoken IDs.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtoken IDs as integers.
""" |
return [
self._subtoken_string_to_id[subtoken]
for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.