INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Run the model and extract samples. Args: features: an map of string to `Tensor`. Returns: samples: an integer `Tensor`. logits: a list of `Tensor`s, one per datashard. losses: a dictionary: {loss-name (string): floating point `Scalar`}.
def sample(self, features): """Run the model and extract samples. Args: features: an map of string to `Tensor`. Returns: samples: an integer `Tensor`. logits: a list of `Tensor`s, one per datashard. losses: a dictionary: {loss-name (string): floating point `Scalar`}. """ logits, losses = self(features) # pylint: disable=not-callable if self._target_modality_is_real: return logits, logits, losses # Raw numbers returned from real modality. if self.hparams.sampling_method == "argmax": samples = tf.argmax(logits, axis=-1) else: assert self.hparams.sampling_method == "random" def multinomial_squeeze(logits, temperature=1.0): logits_shape = common_layers.shape_list(logits) reshaped_logits = ( tf.reshape(logits, [-1, logits_shape[-1]]) / temperature) choices = tf.multinomial(reshaped_logits, 1) choices = tf.reshape(choices, logits_shape[:-1]) return choices samples = multinomial_squeeze(logits, self.hparams.sampling_temp) return samples, logits, losses
Model fn for Estimator. Args: hparams: HParams, model hyperparameters features: dict<str name, Tensor feature> labels: Tensor mode: tf.estimator.ModeKeys config: RunConfig, possibly with data_parallelism attribute params: dict, may include batch_size, use_tpu decode_hparams: HParams, used when mode == PREDICT. use_tpu: A bool, whether to build the inference graph for TPU. Returns: TPUEstimatorSpec if use tpu else EstimatorSpec
def estimator_model_fn(cls, hparams, features, labels, mode, config=None, params=None, decode_hparams=None, use_tpu=False): """Model fn for Estimator. Args: hparams: HParams, model hyperparameters features: dict<str name, Tensor feature> labels: Tensor mode: tf.estimator.ModeKeys config: RunConfig, possibly with data_parallelism attribute params: dict, may include batch_size, use_tpu decode_hparams: HParams, used when mode == PREDICT. use_tpu: A bool, whether to build the inference graph for TPU. Returns: TPUEstimatorSpec if use tpu else EstimatorSpec """ if mode == tf.estimator.ModeKeys.TRAIN: create_dummy_vars() hparams = hparams_lib.copy_hparams(hparams) # Instantiate model data_parallelism = None if not use_tpu and config: data_parallelism = config.data_parallelism reuse = tf.get_variable_scope().reuse model = cls( hparams, mode, data_parallelism=data_parallelism, decode_hparams=decode_hparams, _reuse=reuse) # PREDICT mode if mode == tf.estimator.ModeKeys.PREDICT: if use_tpu: inputs = features.get("inputs") if inputs is None: inputs = features["targets"] shape = inputs.get_shape().as_list() if shape[0] is None: shape[0] = decode_hparams.batch_size or hparams.batch_size if shape[1] is None: shape[1] = hparams.max_input_seq_length or hparams.max_length inputs.set_shape(shape) return model.estimator_spec_predict(features, use_tpu=use_tpu) # TRAIN and EVAL modes if hparams.eval_run_autoregressive and mode == tf.estimator.ModeKeys.EVAL: logits, losses_dict = model.eval_autoregressive(features) else: logits, losses_dict = model(features) # pylint: disable=not-callable # Support model-generated labels by overriding features["targets"] with # logits["self_generated_targets"]. if isinstance(logits, dict) and "self_generated_targets" in logits: # Overwrite 'features["targets"]' and 'labels' # by logits["self_generated_targets"]. tf.logging.info("Replacing targets with model-provided targets.") features["targets"] = labels = logits.pop("self_generated_targets") assert list(logits.keys()) == ["logits"], ( # See "Returns" in the "top" method docstring for the expected # "logits" format when targets are generated at training time. "Expect only key 'logits' when there is 'self_generated_targets'. " "Found {}".format(logits.keys()) ) # Recover the original logits tensor from the logits dict. logits = logits["logits"] # Can be a tf.Tensor or a dict. # Set known shapes if common_layers.is_xla_compiled(): if isinstance(logits, dict): for k, v in sorted(six.iteritems(logits)): if "scalar/" in k: continue shape = v.get_shape().as_list() if shape[0] is None: shape[0] = params["batch_size"] if shape[1] is None: shape[1] = hparams.max_length v.set_shape(shape) else: shape = logits.get_shape().as_list() if shape[0] is None: shape[0] = params["batch_size"] if shape[1] is None: shape[1] = hparams.max_length logits.set_shape(shape) assert "training" in losses_dict # Attack mode if mode == "attack": return logits # Summarize losses model._summarize_losses(losses_dict) # pylint: disable=protected-access # Accumulate losses loss = sum(losses_dict[key] for key in sorted(losses_dict.keys())) # EVAL mode if mode == tf.estimator.ModeKeys.EVAL: return model.estimator_spec_eval(features, logits, labels, loss, losses_dict) # TRAIN mode assert mode == tf.estimator.ModeKeys.TRAIN num_async_replicas = 1 if config and not use_tpu: num_async_replicas = config.t2t_device_info["num_async_replicas"] return model.estimator_spec_train( loss, num_async_replicas=num_async_replicas, use_tpu=use_tpu)
Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode.
def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False): """Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode.""" train_op = self.optimize(loss, num_async_replicas=num_async_replicas, use_tpu=use_tpu) if use_tpu: if self._hparams.warm_start_from: def scaffold_fn(): self.initialize_from_ckpt(self._hparams.warm_start_from) return tf.train.Scaffold() else: scaffold_fn = None # Note: important to call this before remove_summaries() if self.hparams.tpu_enable_host_call: host_call = self.create_train_host_call() else: host_call = None remove_summaries() return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) else: if self._hparams.warm_start_from: self.initialize_from_ckpt(self._hparams.warm_start_from) # When loading weights from a pre-trained model, you want to be able to # load separate weights into the encoder and decoder. if self._hparams.warm_start_from_second: self.initialize_from_ckpt(self._hparams.warm_start_from_second) return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op)
Constructs `tf.estimator.EstimatorSpec` for EVAL (evaluation) mode.
def estimator_spec_eval(self, features, logits, labels, loss, losses_dict): """Constructs `tf.estimator.EstimatorSpec` for EVAL (evaluation) mode.""" del losses_dict hparams = self.hparams if not hasattr(hparams, "problem"): raise NotImplementedError(_no_problem_err("estimator_spec_eval")) problem = hparams.problem if common_layers.is_xla_compiled(): # Note: important to call this before remove_summaries() if self.hparams.tpu_enable_host_call: host_call = self.create_eval_host_call() else: host_call = None remove_summaries() eval_metrics_fn = create_tpu_eval_metrics_fn(problem, hparams) batch_size = [feature.shape.as_list()[0] for _, feature in features.items() if feature.shape.ndims][0] # Add batch dimension to all features since tpu requires the batch # dimension on all tensors. for name, feature in features.items(): if not feature.shape.as_list(): # All features must have a batch dimension feature = tf.tile(tf.expand_dims(feature, 0), [batch_size]) features[name] = feature eval_metrics_fn_args = dict( logits=logits, # possibly a dict labels=labels, features=features, # dict ) eval_metrics_fn_flat_args = _flatten_dict(eval_metrics_fn_args) return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.EVAL, eval_metrics=(eval_metrics_fn, eval_metrics_fn_flat_args), host_call=host_call, loss=loss) else: task_list = [problem] if hasattr(problem, "task_list"): task_list = problem.task_list eval_metrics_fns = metrics.create_evaluation_metrics(task_list, hparams) eval_metrics = {} for metric_name, metric_fn in six.iteritems(eval_metrics_fns): if isinstance(logits, dict): # the key is located in the center of metric_name: "metrics-%s/%s/%s" k = metric_name.split("/")[1] if k in logits: eval_metrics[metric_name] = metric_fn(logits[k], features, features[k]) else: # We do not make it an error because we sometimes run models that # predict only parts of the targets defined by the Problem class. # For example, an autoencoder or pure-video model can run on a gym # problem even if another model is also predicting other things, # like actions or rewards. tf.logging.warning("No key %s in logits for evaluation." % k) else: eval_metrics[metric_name] = metric_fn(logits, features, features["targets"]) if isinstance(logits, dict): predictions = logits else: predictions = {"predictions": logits} evaluation_hooks = [] # Create a SummarySaverHook eval_dir = os.path.join( self.hparams.model_dir, self.hparams.get("eval_dir_name", "eval")) eval_summary_hook = tf.train.SummarySaverHook( save_steps=1, output_dir=eval_dir, summary_op=tf.summary.merge_all()) evaluation_hooks.append(eval_summary_hook) evaluation_hooks += problem.eval_hooks(features, logits, hparams) return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.EVAL, predictions=predictions, eval_metric_ops=eval_metrics, evaluation_hooks=evaluation_hooks, loss=loss)
Constructs `tf.estimator.EstimatorSpec` for PREDICT (inference) mode.
def estimator_spec_predict(self, features, use_tpu=False): """Constructs `tf.estimator.EstimatorSpec` for PREDICT (inference) mode.""" decode_hparams = self._decode_hparams top_beams = decode_hparams.beam_size if decode_hparams.return_beams else 1 infer_out = self.infer( features, beam_size=decode_hparams.beam_size, top_beams=top_beams, alpha=decode_hparams.alpha, decode_length=decode_hparams.extra_length, use_tpu=use_tpu) if isinstance(infer_out, dict): outputs = infer_out["outputs"] scores = infer_out["scores"] else: outputs = infer_out scores = None inputs = features.get("inputs") if inputs is None: inputs = features["targets"] predictions = { "outputs": outputs, "scores": scores, "inputs": inputs, "targets": features.get("infer_targets"), } # Pass through remaining features for name, feature in features.items(): if name not in list(predictions.keys()) + ["infer_targets"]: if name == "decode_loop_step": continue if not feature.shape.as_list(): # All features must have a batch dimension batch_size = common_layers.shape_list(outputs)[0] feature = tf.tile(tf.expand_dims(feature, 0), [batch_size]) predictions[name] = feature _del_dict_non_tensors(predictions) export_out = {"outputs": predictions["outputs"]} if "scores" in predictions: export_out["scores"] = predictions["scores"] # Necessary to rejoin examples in the correct order with the Cloud ML Engine # batch prediction API. if "batch_prediction_key" in predictions: export_out["batch_prediction_key"] = predictions["batch_prediction_key"] export_outputs = { tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(export_out) } if use_tpu: # Note: important to call this before remove_summaries() if self.hparams.tpu_enable_host_call: host_call = self.create_eval_host_call() else: host_call = None remove_summaries() return tf.contrib.tpu.TPUEstimatorSpec( tf.estimator.ModeKeys.PREDICT, predictions=predictions, host_call=host_call, export_outputs=export_outputs) else: return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.PREDICT, predictions=predictions, export_outputs=export_outputs)
Adds `tf.summary`s to all terms in the losses dictionary.
def _summarize_losses(self, losses_dict): """Adds `tf.summary`s to all terms in the losses dictionary.""" if common_layers.should_generate_summaries(): with tf.name_scope("losses"): for loss_name, loss_val in sorted(losses_dict.items()): tf.summary.scalar(loss_name, loss_val)
Scheduled sampling. Performs forward inference again with "targets" feature replaced with values sampled from the model. This is the identity unless self.hparams.scheduled_sampling_prob > 0 (default). **WARNING**: This is not a faithful implementation of scheduled sampling. This implementation samples tokens for timestep t condtioned on gold tokens 1...t-1. A proper implementation must condition on a mix of gold and sampled tokens. Doing so is not efficient for models such like Transformer. Args: features: {str: Tensor}. Features sharded along batch dimension. logits: Tensor. Logits for each shard of data. losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor Returns: new_logits: Tensor. new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or (ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a weighted average.
def maybe_scheduled_sampling(self, features, logits, losses): """Scheduled sampling. Performs forward inference again with "targets" feature replaced with values sampled from the model. This is the identity unless self.hparams.scheduled_sampling_prob > 0 (default). **WARNING**: This is not a faithful implementation of scheduled sampling. This implementation samples tokens for timestep t condtioned on gold tokens 1...t-1. A proper implementation must condition on a mix of gold and sampled tokens. Doing so is not efficient for models such like Transformer. Args: features: {str: Tensor}. Features sharded along batch dimension. logits: Tensor. Logits for each shard of data. losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor Returns: new_logits: Tensor. new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or (ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a weighted average. """ hparams = self.hparams problem_hparams = self._problem_hparams # Only do scheduled sampling if requested. if hparams.scheduled_sampling_prob == 0.0: return (logits, losses) # Only do scheduled sampling on language tasks. modality = problem_hparams.modality["targets"] if modality != modalities.ModalityType.SYMBOL: assert hparams.scheduled_sampling_prob == 0, ( "Scheduled sampling only applies to ModalityType.SYMBOL. Set " "hparams.scheduled_sampling_prob == 0.0.") return (logits, losses) # Only do scheduled sampling when training. is_training = (hparams.mode == tf.estimator.ModeKeys.TRAIN) if not is_training: tf.logging.info("Running in %s mode. Not using scheduled sampling.", hparams.mode) return (logits, losses) # Pad vocabulary if vocab size must be evenly divisible by vocab_divisor. vocab_size = problem_hparams.vocab_size["targets"] assert vocab_size is not None assert hparams.vocab_divisor == 1 def sample(x): """Multinomial sampling from a n-dimensional tensor.""" samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1) reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1]) return tf.to_int32(reshaped_samples) def mix_gold_sampled(gold_targets, sampled_targets, mixin_prob): """Interleave sampled and gold tokens randomly.""" return tf.where( tf.less( tf.random_uniform(common_layers.shape_list(sampled_targets)), mixin_prob), sampled_targets, gold_targets) def sampled_results(features, logits, mixin_prob): """Generate scheduled sampling results.""" sampled_targets = sample(logits) new_targets = mix_gold_sampled(features["targets"], sampled_targets, mixin_prob) new_targets = tf.stop_gradient(new_targets) # Treat new_targets as given. new_features = copy.copy(features) new_features["targets"] = new_targets with tf.variable_scope(tf.get_variable_scope(), reuse=True): # Compute bottom() for new_targets. # # TODO(duckworthd): Only apply bottom to 'new_targets'. new_transformed_features = self.bottom(new_features) # Compute body. with tf.variable_scope("body"): new_body_outputs, new_losses = self._normalize_body_output( self.body(new_transformed_features)) assert "training" not in new_losses # Compute top. new_logits = self.top(new_body_outputs, new_features) # Compute loss. Use original features (== labels). if (hparams.mode != tf.estimator.ModeKeys.PREDICT and hparams.mode != "attack"): new_losses["training"] = self.loss(new_logits, features) else: new_losses["training"] = 0.0 return new_logits, new_losses tf.logging.info("Using scheduled sampling.") assert hparams.scheduled_sampling_prob == 1.0, ( "hparams.scheduled_sampling_prob must be 0 or 1.") # Gradually increase over a warmup period. Lower numbers mean more gold # tokens. mixin_prob = ( hparams.scheduled_sampling_gold_mixin_prob * common_layers.inverse_exp_decay( hparams.scheduled_sampling_warmup_steps, min_value=0.001) ) # Apply scheduled sampling over N passes. The logits from the (n-1)-th pass # will be mixed with gold tokens for conditioning in the n-th pass. scheduled_sampling_num_passes = getattr( hparams, "scheduled_sampling_num_passes", 1) assert scheduled_sampling_num_passes > 0, ( "hparams.scheduled_sampling_num_passes must be > 0 if " "hparams.scheduled_sampling_prob > 0.0") new_logits = logits new_losses = losses for _ in range(scheduled_sampling_num_passes): new_logits, new_losses = sampled_results(features, new_logits, mixin_prob) return new_logits, new_losses
Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters Returns: decoder_input: a Tensor, bottom of decoder stack decoder_self_attention_bias: a Tensor, containing large negative values to implement masked attention and possibly biases for diagonal alignments pad_remover (expert_utils.PadRemover): an util object to remove padding
def attention_lm_moe_prepare_decoder(targets, hparams): """Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters Returns: decoder_input: a Tensor, bottom of decoder stack decoder_self_attention_bias: a Tensor, containing large negative values to implement masked attention and possibly biases for diagonal alignments pad_remover (expert_utils.PadRemover): an util object to remove padding """ targets_pad_mask = common_attention.embedding_to_padding(targets) with tf.name_scope("pad_remover"): # Because of the shift_right, the <eos> token will be considered as # padding. In practice, it doesn't really matter, due to the triangular # mask, this token should never be attended. pad_remover = expert_utils.PadRemover(targets_pad_mask) if hparams.prepend_mode == "prepend_inputs_full_attention": decoder_self_attention_bias = ( common_attention.attention_bias_prepend_inputs_full_attention( targets_pad_mask)) else: decoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle(tf.shape(targets)[1])) decoder_input = common_layers.shift_right_3d(targets) if hparams.pos == "timing": decoder_input = common_attention.add_timing_signal_1d(decoder_input) return (decoder_input, decoder_self_attention_bias, pad_remover)
Return a flat int32 tensor of shape [1, batch_size*length, 1].
def get_batch_coordinate(x, axis=0): """Return a flat int32 tensor of shape [1, batch_size*length, 1].""" # Compute the batch coordinate before flattening all batches batch_coordinate = tf.expand_dims( common_attention.coordinate_tensor(tf.shape(x)[:-1], axis=axis), axis=-1) return batch_coordinate
Duplicate elements of bc by length_factor. Args: bc (tf.Tensor): int32 tensor of shape [1, length, 1] length_factor (int): Returns: tf.Tensor: of shape [1, length*length_factor, 1] where every elements has been duplicated length_factor times.
def expand_batch_coordinates(bc, length_factor): """Duplicate elements of bc by length_factor. Args: bc (tf.Tensor): int32 tensor of shape [1, length, 1] length_factor (int): Returns: tf.Tensor: of shape [1, length*length_factor, 1] where every elements has been duplicated length_factor times. """ assert bc.get_shape().as_list() == [1, None, 1] # bc has shape [1, length, 1] bc *= tf.constant([[1] * length_factor]) # bc has shape [1, length, length_factor] bc = tf.reshape(bc, [1, -1, 1]) # bc has shape [1, length*length_factor] return bc
Remove padding by concatenating all dimension into one. Args: x (tf.Tensor): input of shape [batch_size, length, depth] pad_remover (obj): a PadRemover object mode (ModeKeys): infer, train or eval. If inference, the padding remover is not applied Returns: tf.Tensor of shape [1,length_nonpad,depth] where length_nonpad <= batch_size*length
def remove_pad(x, pad_remover, mode): """Remove padding by concatenating all dimension into one. Args: x (tf.Tensor): input of shape [batch_size, length, depth] pad_remover (obj): a PadRemover object mode (ModeKeys): infer, train or eval. If inference, the padding remover is not applied Returns: tf.Tensor of shape [1,length_nonpad,depth] where length_nonpad <= batch_size*length """ # Concatenate all tokens (without padding) x = expert_utils.flatten_all_but_last(x) # Remove padding for training and eval if mode != ModeKeys.PREDICT: # This is a hack to allows inference when the <go> token # is detected as padding and removed. This works for now because there is # no padding at inference. x = pad_remover.remove(x) x = tf.expand_dims(x, axis=0) # Now batch_size=1 return x
Set of hyperparameters. suitable for 1 gpu. on lm1b_32k: ~229M params 0.9 steps/sec on [GeForce GTX TITAN X] Returns: a hparams object
def attention_lm_moe_base(): """Set of hyperparameters. suitable for 1 gpu. on lm1b_32k: ~229M params 0.9 steps/sec on [GeForce GTX TITAN X] Returns: a hparams object """ hparams = common_hparams.basic_params1() hparams.hidden_size = 1024 hparams.batch_size = 8192 hparams.max_length = 256 hparams.dropout = 0.0 hparams.clip_grad_norm = 0. # i.e. no gradient clipping hparams.optimizer_adam_epsilon = 1e-9 hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.learning_rate_warmup_steps = 2000 hparams.initializer_gain = 1.0 hparams.num_hidden_layers = 4 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 hparams.num_sampled_classes = 0 hparams.label_smoothing = 0.0 hparams.shared_embedding_and_softmax_weights = False hparams.add_hparam("filter_size", 2048) # Add new ones like this. hparams.moe_num_experts = 32 # attention-related flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. hparams.add_hparam("attention_dropout", 0.0) hparams.add_hparam("relu_dropout", 0.0) hparams.add_hparam("pos", "timing") # timing, none hparams.add_hparam("moe_layers", "2") # comma separated list of layer numbers # moe params. local attention moe. # If attention_layers is set, the num_hidden_layers parameter will be ignored # and each caracter of the string will correspond to one attention # layer type hparams.add_hparam("attention_layers", "") hparams.add_hparam("attention_type", AttentionType.MULTIHEAD) hparams.add_hparam("attention_local", False) hparams.add_hparam("attention_moe_k", 2) hparams.add_hparam("attention_num_head", 1) hparams.add_hparam("attention_num_experts", 16) hparams.add_hparam("attention_split_batch", False) hparams.add_hparam("attention_red_factor", 3) hparams.add_hparam("attention_block_length", 128) hparams.add_hparam("attention_reduction_type", "conv") # Non linearity for the attention reduction. Either "none", or "silu" ( # Sigmoid Linear-Unit described in https://arxiv.org/abs/1710.05941) hparams.add_hparam("attention_nonlinearity", "none") # If attention_exp_factor is set, each input to local_expert_attention (of # dimensionality hidden size) is projected into attention_exp_factor smaller # inputs, each of dimensionality attention_exp_inputdim. (otherwise # attention_exp_inputdim is ignored) hparams.add_hparam("attention_exp_factor", 0) hparams.add_hparam("attention_exp_inputdim", 128) # Key, query and value dimensions for the attention hparams.add_hparam("attention_kq_size", 128) hparams.add_hparam("attention_v_size", 256) # Loss coef for load balancing hparams.add_hparam("attention_load_balance", 2e-2) # Locality-sensitive hashing params hparams.add_hparam("lsh_num_hyperplanes", 4) hparams.add_hparam("lsh_use_map_fn", False) hparams.add_hparam("use_sepconv", False) hparams.add_hparam("diet_experts", False) hparams.add_hparam("memory_efficient_ffn", False) # if True, we learn a non-autoregressive model from "inputs" to "targets". # if False, we learn an autoregressive model to generate "targets" hparams.add_hparam("use_inputs", False) return hparams
Hyper parameters specifics for long sequence generation.
def attention_lm_moe_base_long_seq(): """Hyper parameters specifics for long sequence generation.""" hparams = attention_lm_moe_base() hparams.max_length = 0 # max_length == batch_size hparams.eval_drop_long_sequences = True hparams.min_length_bucket = 256 # Avoid cyclic problems for big batches hparams.use_sepconv = True return hparams
Base model with attention expert.
def attention_lm_moe_base_ae(): """Base model with attention expert.""" hparams = attention_lm_moe_base_long_seq() hparams.attention_type = AttentionType.LOCAL_EXPERTS hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 10000 # According to noam, ("n", "da") seems better for harder-to-learn models # hparams.layer_preprocess_sequence = "n" # hparams.layer_postprocess_sequence = "da" return hparams
Experiment with the exp_factor params.
def attention_lm_ae_extended(): """Experiment with the exp_factor params.""" hparams = attention_lm_moe_base_long_seq() hparams.attention_layers = "eeee" hparams.attention_local = True # hparams.factored_logits=1 # Necessary when the number of expert grow bigger hparams.attention_moe_k = 2 hparams.attention_exp_factor = 4 # hparams.attention_exp_inputdim = 128 hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" return hparams
Base model with attention expert.
def attention_lm_moe_base_memeff(): """Base model with attention expert.""" hparams = attention_lm_moe_base_long_seq() hparams.use_sepconv = False hparams.diet_experts = True hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.layer_prepostprocess_dropout = 0.0 hparams.memory_efficient_ffn = True hparams.attention_type = AttentionType.MEMORY_EFFICIENT hparams.num_heads = 8 hparams.factored_logits = True return hparams
Cheap model for single-gpu training. on lm1b_32k: ~312M params 1.6 steps/sec on [GeForce GTX TITAN X] After 50K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.31 Returns: an hparams object.
def attention_lm_moe_small(): """Cheap model for single-gpu training. on lm1b_32k: ~312M params 1.6 steps/sec on [GeForce GTX TITAN X] After 50K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.31 Returns: an hparams object. """ hparams = attention_lm_moe_base() hparams.num_hidden_layers = 4 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.moe_num_experts = 128 hparams.moe_layers = "2" return hparams
Cheap model for debugging. Returns: an hparams object.
def attention_lm_attention_moe_tiny(): """Cheap model for debugging. Returns: an hparams object. """ hparams = attention_lm_moe_small() hparams.moe_layers = "" hparams.attention_num_experts = 128 hparams.filter_size = 8192 hparams.attention_type = AttentionType.LOCAL_EXPERTS return hparams
Large model for distributed training. Over 1B parameters, so requires multi-gpu training due to memory requirements. on lm1b_32k: After 45K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.18 eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9 Returns: an hparams object.
def attention_lm_moe_large(): """Large model for distributed training. Over 1B parameters, so requires multi-gpu training due to memory requirements. on lm1b_32k: After 45K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.18 eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9 Returns: an hparams object. """ hparams = attention_lm_moe_base() hparams.num_hidden_layers = 5 hparams.moe_layers = "3" hparams.hidden_size = 1024 hparams.num_heads = 16 hparams.filter_size = 4096 hparams.moe_hidden_sizes = "4096" hparams.moe_num_experts = 128 hparams.layer_prepostprocess_dropout = 0.2 return hparams
Memory-efficient version.
def attention_lm_moe_memory_efficient(): """Memory-efficient version.""" hparams = attention_lm_moe_large() hparams.diet_experts = True hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.layer_prepostprocess_dropout = 0.0 hparams.memory_efficient_ffn = True hparams.attention_type = AttentionType.MEMORY_EFFICIENT hparams.num_heads = 8 hparams.factored_logits = True return hparams
Unnecessarily large model with 24B params - because we can.
def attention_lm_moe_24b_diet(): """Unnecessarily large model with 24B params - because we can.""" hparams = attention_lm_moe_large_diet() hparams.moe_hidden_sizes = "12288" hparams.moe_num_experts = 1024 hparams.batch_size = 4096 return hparams
Version to use for seq2seq.
def attention_lm_moe_translation(): """Version to use for seq2seq.""" hparams = attention_lm_moe_base() hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.learning_rate = 0.4 hparams.prepend_mode = "prepend_inputs_masked_attention" hparams.max_length = 512 hparams.label_smoothing = 0.1 hparams.layer_prepostprocess_dropout = 0.2 hparams.num_hidden_layers = 6 hparams.moe_layers = "0,1,2,3,4,5" hparams.shared_embedding_and_softmax_weights = True return hparams
Version to use with languagemodel_wiki_scramble1k50.
def attention_lm_moe_unscramble_base(): """Version to use with languagemodel_wiki_scramble1k50.""" hparams = attention_lm_no_moe_small() hparams.use_inputs = True hparams.min_length_bucket = 1024 hparams.max_length = 1024 hparams.batch_size = 5000 hparams.layer_prepostprocess_dropout = 0.0 hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" return hparams
Transform input from data space to model space. Args: x: A Tensor with shape [batch, ...] model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: body_input: A Tensor with shape [batch, ?, ?, model_hparams.hidden_size].
def audio_bottom(x, model_hparams, vocab_size): """Transform input from data space to model space. Args: x: A Tensor with shape [batch, ...] model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: body_input: A Tensor with shape [batch, ?, ?, model_hparams.hidden_size]. """ del vocab_size # unused arg inputs = x with tf.variable_scope("audio_modality"): # TODO(aidangomez): Will need to sort out a better audio pipeline def xnet_resblock(x, filters, res_relu, name): """Xception block.""" with tf.variable_scope(name): # Typically audio samples are >100k samples in length and have a width # of 2 or 4. Mono audio has a single channel while stereo has 2. y = common_layers.separable_conv_block( x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding="SAME", force2d=True, name="sep_conv_block") y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) return y + common_layers.conv_block( x, filters, [((1, 1), (1, 1))], padding="SAME", strides=(2, 2), first_relu=res_relu, force2d=True, name="res_conv0") x = tf.to_float(inputs) / 255. x.set_shape([None, None, None, 1]) for i in range(model_hparams.audio_compression): x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i) return xnet_resblock(x, model_hparams.hidden_size, False, "compress_block_final")
Bottom transformation for target images.
def image_targets_bottom(x, model_hparams, vocab_size): """Bottom transformation for target images.""" pixel_embedding_size = 64 inputs = x with tf.variable_scope("image_modality"): if not tf.executing_eagerly(): tf.summary.image( "targets_bottom", common_layers.tpu_safe_image_summary(inputs), max_outputs=1) inputs_shape = common_layers.shape_list(inputs) if len(inputs_shape) != 4: raise ValueError("Assuming images given as int tensors in the format " "[batch, height, width, channels] (256 values).") # We embed each of 256=vocab_size possible pixel values. embedding_var = tf.get_variable( "pixel_embedding", [vocab_size, pixel_embedding_size]) hot_inputs = tf.one_hot(tf.to_int32(inputs), vocab_size) hot_inputs = tf.reshape(hot_inputs, [-1, vocab_size]) embedded = tf.matmul(hot_inputs, embedding_var) # Let's now merge all channels that were embedded into a single vector. merged_size = pixel_embedding_size * inputs_shape[3] embedded = tf.reshape(embedded, inputs_shape[:3] + [merged_size]) merged = tf.layers.dense( embedded, model_hparams.hidden_size, name="merge_pixel_embedded_channels") return merged
Compresses channel-wise input pixels into whole pixel representions. Perform conversion of RGB pixel values to a real number in the range -1 to 1. This combines pixel channels to form a representation of shape [img_len, img_len]. Args: inputs: Tensor representing RGB pixel intensities as integers, of shape [batch, img_len, img_len, channels]. model_hparams: HParams, model hyperparmeters. name: string, scope. Returns: body_input: Tensor of shape [batch, img_len, img_len, model_hparams.hidden_size].
def _image_channel_compress_bottom(inputs, model_hparams, name="bottom"): """Compresses channel-wise input pixels into whole pixel representions. Perform conversion of RGB pixel values to a real number in the range -1 to 1. This combines pixel channels to form a representation of shape [img_len, img_len]. Args: inputs: Tensor representing RGB pixel intensities as integers, of shape [batch, img_len, img_len, channels]. model_hparams: HParams, model hyperparmeters. name: string, scope. Returns: body_input: Tensor of shape [batch, img_len, img_len, model_hparams.hidden_size]. """ num_channels = 3 with tf.variable_scope(name): inputs = tf.to_float(inputs) hp = model_hparams if hp.mode != tf.estimator.ModeKeys.PREDICT: tf.summary.image( "inputs", common_layers.tpu_safe_image_summary(inputs), max_outputs=2) inputs = common_layers.convert_rgb_to_symmetric_real(inputs) # Reshape inputs to apply convolutions across [img_len, img_len*channels]. inputs_shape = common_layers.shape_list(inputs) inputs = tf.reshape( inputs, [-1, inputs_shape[1], inputs_shape[2] * inputs_shape[3], 1]) # Compress RGB intensities for each pixel using a convolution. outputs = tf.layers.conv2d( inputs, model_hparams.hidden_size, kernel_size=(1, num_channels), padding="VALID", strides=(1, num_channels), activation=tf.nn.relu, name="conv_input") return outputs
Bottom transformation for image targets.
def image_channel_embeddings_bottom(x, model_hparams, vocab_size): """Bottom transformation for image targets.""" del vocab_size # unused arg inputs = tf.to_int32(x) io_depth = model_hparams.num_channels tshape = common_layers.shape_list(inputs) hidden_size = model_hparams.hidden_size target_embeddings = cia.get_channel_embeddings( io_depth, inputs, hidden_size, "input_bottom") return tf.reshape(target_embeddings, [tshape[0], tshape[1], tshape[2] * io_depth, hidden_size])
Use batchnorm instead of CMVN and shorten the stft with strided convs. Args: x: float32 tensor with shape [batch_size, len, 1, freqs * channels] model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: float32 tensor with shape [batch_size, shorter_len, 1, hidden_size]
def speech_recognition_bottom(x, model_hparams, vocab_size): """Use batchnorm instead of CMVN and shorten the stft with strided convs. Args: x: float32 tensor with shape [batch_size, len, 1, freqs * channels] model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: float32 tensor with shape [batch_size, shorter_len, 1, hidden_size] """ del vocab_size # unused arg inputs = x p = model_hparams num_mel_bins = p.audio_num_mel_bins num_channels = 3 if p.audio_add_delta_deltas else 1 with tf.variable_scope("speech_recognition_modality"): if p.audio_preproc_in_bottom: # Compute filterbanks with tf.variable_scope("fbanks"): waveforms = tf.squeeze(inputs, [2, 3]) mel_fbanks = common_audio.compute_mel_filterbank_features( waveforms, sample_rate=p.audio_sample_rate, dither=p.audio_dither, preemphasis=p.audio_preemphasis, frame_length=p.audio_frame_length, frame_step=p.audio_frame_step, lower_edge_hertz=p.audio_lower_edge_hertz, upper_edge_hertz=p.audio_upper_edge_hertz, num_mel_bins=p.audio_num_mel_bins, apply_mask=True) if p.audio_add_delta_deltas: mel_fbanks = common_audio.add_delta_deltas(mel_fbanks) x = tf.reshape(mel_fbanks, common_layers.shape_list(mel_fbanks)[:2] + [num_mel_bins, num_channels]) nonpadding_mask = 1. - common_attention.embedding_to_padding(x) num_of_nonpadding_elements = tf.reduce_sum( nonpadding_mask) * num_mel_bins * num_channels # This replaces CMVN estimation on data var_epsilon = 1e-09 mean = tf.reduce_sum( x, axis=[1], keepdims=True) / num_of_nonpadding_elements variance = (num_of_nonpadding_elements * mean**2. - 2. * mean * tf.reduce_sum(x, axis=[1], keepdims=True) + tf.reduce_sum(x**2, axis=[1], keepdims=True) ) / num_of_nonpadding_elements x = (x - mean) * tf.rsqrt(variance + var_epsilon) * tf.expand_dims( nonpadding_mask, -1) else: x = inputs # The convention is that the models are flattened along the spatial, # dimensions, thus the speech preprocessor treats frequencies and # channels as image colors (last axis) x.set_shape([None, None, num_mel_bins, num_channels]) # TODO(chorowski): how to specify bottom's hparams and avoid hardcoding? x = tf.pad(x, [[0, 0], [0, 8], [0, 0], [0, 0]]) for _ in range(2): x = tf.layers.conv2d( x, 128, (3, 3), (2, 2), use_bias=False) x = common_layers.layer_norm(x) x = tf.nn.relu(x) xshape = common_layers.shape_list(x) # apply a conv that will remove all frequencies and at the same time # project the output into desired hidden_size x = tf.pad(x, [[0, 0], [0, 2], [0, 0], [0, 0]]) x = tf.layers.conv2d(x, p.hidden_size, (3, xshape[2]), use_bias=False) assert common_layers.shape_list(x)[2] == 1 x = common_layers.layer_norm(x) x = tf.nn.relu(x) return x
Create or get concatenated embedding or softmax variable. Args: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size Returns: a list of num_shards Tensors.
def get_weights(model_hparams, vocab_size, hidden_dim=None): """Create or get concatenated embedding or softmax variable. Args: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size Returns: a list of num_shards Tensors. """ if hidden_dim is None: hidden_dim = model_hparams.hidden_size num_shards = model_hparams.symbol_modality_num_shards shards = [] for i in range(num_shards): shard_size = (vocab_size // num_shards) + ( 1 if i < vocab_size % num_shards else 0) var_name = "weights_%d" % i shards.append( tf.get_variable( var_name, [shard_size, hidden_dim], initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5))) if num_shards == 1: ret = shards[0] else: ret = tf.concat(shards, 0) # Convert ret to tensor. if not tf.executing_eagerly(): ret = common_layers.convert_gradient_to_tensor(ret) return ret
Bottom transformation for symbols.
def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse): """Bottom transformation for symbols.""" with tf.variable_scope(name, reuse=reuse): # Ensure the inputs are 3-D if len(x.get_shape()) == 4: x = tf.squeeze(x, axis=3) while len(x.get_shape()) < 3: x = tf.expand_dims(x, axis=-1) var = get_weights(model_hparams, vocab_size) x = common_layers.dropout_no_scaling( x, 1.0 - model_hparams.symbol_dropout) ret = common_layers.gather(var, x) if model_hparams.multiply_embedding_mode == "sqrt_depth": ret *= model_hparams.hidden_size**0.5 ret *= tf.expand_dims( common_layers.cast_like(tf.not_equal(x, 0), ret), -1) return ret
Bottom transformation for target symbols.
def symbol_targets_bottom(x, model_hparams, vocab_size): """Bottom transformation for target symbols.""" if (model_hparams.shared_embedding_and_softmax_weights or model_hparams.get("shared_embedding")): try: return _symbol_bottom_simple( x, model_hparams, vocab_size, "shared", reuse=True) except ValueError: # perhaps there were no inputs, and this is a new variable. return _symbol_bottom_simple( x, model_hparams, vocab_size, "shared", reuse=None) else: return _symbol_bottom_simple( x, model_hparams, vocab_size, "target_emb", reuse=None)
Bottom transformation for embedding video bitwise.
def video_bitwise_bottom(x, model_hparams, vocab_size): """Bottom transformation for embedding video bitwise.""" pixel_embedding_size = 64 inputs = x with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE): common_layers.summarize_video(inputs, "bottom") # Embed bitwise. assert vocab_size == 256 embedded = discretization.int_to_bit_embed(inputs, 8, pixel_embedding_size) # Project. return tf.layers.dense( embedded, model_hparams.hidden_size, name="merge_pixel_embedded_frames")
Bottom transformation for video.
def video_pixel_noise_bottom(x, model_hparams, vocab_size): """Bottom transformation for video.""" input_noise = getattr(model_hparams, "video_modality_input_noise", 0.25) inputs = x if model_hparams.mode == tf.estimator.ModeKeys.TRAIN: background = tfp.stats.percentile(inputs, 50., axis=[0, 1, 2, 3]) input_shape = common_layers.shape_list(inputs) input_size = tf.reduce_prod(input_shape[:-1]) input_mask = tf.multinomial( tf.log([[input_noise, 1.-input_noise]]), input_size) input_mask = tf.reshape(tf.cast(input_mask, tf.int32), input_shape[:-1]+[1]) inputs = inputs * input_mask + background * (1 - input_mask) return video_bottom(inputs, model_hparams, vocab_size)
Convert prediction and target from rgb to real.
def convert_rgb_to_real(prediction, targets): """Convert prediction and target from rgb to real.""" prediction = tf.squeeze(prediction, axis=-1) prediction = common_layers.convert_rgb_to_real(prediction) targets = common_layers.convert_rgb_to_real(targets) return prediction, targets
Compute the CTC loss.
def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn): """Compute the CTC loss.""" del model_hparams, vocab_size # unused arg logits = top_out with tf.name_scope("ctc_loss", values=[logits, targets]): # For CTC we assume targets are 1d, [batch, length, 1, 1] here. targets_shape = targets.get_shape().as_list() assert len(targets_shape) == 4 assert targets_shape[2] == 1 assert targets_shape[3] == 1 targets = tf.squeeze(targets, axis=[2, 3]) logits = tf.squeeze(logits, axis=[2, 3]) targets_mask = 1 - tf.to_int32(tf.equal(targets, 0)) targets_lengths = tf.reduce_sum(targets_mask, axis=1) sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse( targets, targets_lengths) xent = tf.nn.ctc_loss( sparse_targets, logits, targets_lengths, time_major=False, preprocess_collapse_repeated=False, ctc_merge_repeated=False) weights = weight_fn(targets) return tf.reduce_sum(xent), tf.reduce_sum(weights)
Compute loss numerator and denominator for one shard of output.
def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Compute loss numerator and denominator for one shard of output.""" del vocab_size # unused arg logits = top_out logits = common_attention.maybe_upcast(logits, hparams=model_hparams) cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0) return common_layers.padded_cross_entropy( logits, targets, model_hparams.label_smoothing, cutoff=cutoff, weights_fn=weights_fn)
Average loss over the labels.
def multi_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Average loss over the labels.""" del vocab_size # unused arg logits = top_out num_labels = tf.shape(targets)[1] logits = tf.tile(logits, [1, num_labels, 1, 1, 1]) xent, weights = common_layers.padded_cross_entropy( logits, targets, model_hparams.label_smoothing, weights_fn=weights_fn, reduce_sum=False, ) xent = tf.squeeze(xent, [2, 3]) weights = tf.squeeze(weights, [2, 3]) # average loss over all labels loss = tf.reduce_sum(xent, axis=1) weights = tf.reduce_sum(weights, axis=1) loss /= (weights + 1e-8) weights = tf.to_float(tf.greater(weights, 0.)) return tf.reduce_sum(loss*weights), tf.reduce_sum(weights)
Apply softmax cross-entropy between outputs and targets. Args: top_out: logits Tensor with shape [batch, ?, ?, num_classes] targets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes] model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. weights_fn: Returns: loss_scale (cross-entropy), loss_denom
def one_hot_class_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Apply softmax cross-entropy between outputs and targets. Args: top_out: logits Tensor with shape [batch, ?, ?, num_classes] targets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes] model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. weights_fn: Returns: loss_scale (cross-entropy), loss_denom """ del model_hparams, vocab_size # unused arg loss_scale = tf.losses.softmax_cross_entropy( onehot_labels=targets, logits=top_out) weights = weights_fn(targets) loss_denom = tf.reduce_sum(weights) return loss_scale, loss_denom
Poisson loss for real.
def real_log_poisson_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Poisson loss for real.""" del model_hparams, vocab_size # unused arg predictions = top_out if (len(common_layers.shape_list(top_out)) != len( common_layers.shape_list(targets))): predictions = tf.squeeze(top_out, axis=[-1]) with tf.name_scope("log_possion"): weights = weights_fn(targets) lp_loss = tf.nn.log_poisson_loss(targets, predictions) return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights)
Loss for class label.
def sigmoid_class_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Loss for class label.""" # Expect inputs of size [batch-size, timesteps, 1, num-classes], where the # last dimension of num-classes represents logits for binary labels del model_hparams, vocab_size # unused arg loss_scale = tf.losses.sigmoid_cross_entropy( multi_class_labels=targets, logits=top_out) weights = weights_fn(targets) loss_denom = tf.reduce_sum(weights) return loss_scale, loss_denom
Compute loss numerator and denominator for one shard of output.
def video_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Compute loss numerator and denominator for one shard of output.""" del vocab_size # unused arg logits = top_out logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:]) targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:]) cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.01) return common_layers.padded_cross_entropy( logits, targets, model_hparams.label_smoothing, cutoff=cutoff, weights_fn=weights_fn)
Compute loss numerator and denominator for one shard of output.
def video_l1_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Compute loss numerator and denominator for one shard of output.""" del vocab_size # unused arg logits = top_out logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:-1]) targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:]) weights = weights_fn(targets) # Shift targets by 0.5 so later just casting to int gives the prediction. # So for int targets, say 0 and 7, we actually train to predict 0.5 and 7.5. # Later (in merics or infer) this is cast to int anyway. Also, we have no # loss beyond cutoff = 0.2 as these are already correct predictions. targets = tf.to_float(targets) + 0.5 loss = video_l1_internal_loss(logits, targets, model_hparams) return tf.reduce_sum(loss * weights), tf.reduce_sum(weights)
Compute loss numerator and denominator for one shard of output.
def video_l2_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Compute loss numerator and denominator for one shard of output.""" del vocab_size # unused arg logits = top_out logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:-1]) targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:]) weights = weights_fn(targets) # Shift targets by 0.5 so later just casting to int gives the prediction. # So for int targets, say 0 and 7, we actually train to predict 0.5 and 7.5. # Later (in merics or infer) this is cast to int anyway. Also, we have no # loss beyond cutoff = 0.2 as these are already correct predictions. targets = tf.to_float(targets) + 0.5 loss = video_l2_internal_loss(logits, targets, model_hparams) return tf.reduce_sum(loss * weights), tf.reduce_sum(weights)
Transform inputs from model space to target space. Average over inner dims and a linear layer to logits. Args: body_output: A Tensor with shape [batch, ?, ?, body_output_size]. targets: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: a Tensors, each with shape [batch_size, 1, 1, 1, vocab_size]
def class_label_top(body_output, targets, model_hparams, vocab_size): """Transform inputs from model space to target space. Average over inner dims and a linear layer to logits. Args: body_output: A Tensor with shape [batch, ?, ?, body_output_size]. targets: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: a Tensors, each with shape [batch_size, 1, 1, 1, vocab_size] """ del targets # unused arg with tf.variable_scope("class_label_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size)): x = body_output x = tf.reduce_mean(x, axis=[1, 2], keepdims=True) res = tf.layers.dense(x, vocab_size) return tf.expand_dims(res, 3)
Top transformation for images.
def image_top(body_output, targets, model_hparams, vocab_size): """Top transformation for images.""" del targets # unused arg # TODO(lukaszkaiser): is this a universal enough way to get channels? num_channels = model_hparams.problem.num_channels with tf.variable_scope("rgb_softmax"): body_output_shape = common_layers.shape_list(body_output) reshape_shape = body_output_shape[:3] reshape_shape.extend([num_channels, vocab_size]) res = tf.layers.dense(body_output, vocab_size * num_channels) res = tf.reshape(res, reshape_shape) if not tf.get_variable_scope().reuse: res_argmax = tf.argmax(res, axis=-1) tf.summary.image( "result", common_layers.tpu_safe_image_summary(res_argmax), max_outputs=1) return res
Transforms body output to return logits. Args: body_output: Tensor of shape [batch, img_len, img_len, depth]. targets: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: Tensor of shape [batch, img_len, img_len, channels, vocab_size].
def image_channel_compress_top(body_output, targets, model_hparams, vocab_size): """Transforms body output to return logits. Args: body_output: Tensor of shape [batch, img_len, img_len, depth]. targets: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: Tensor of shape [batch, img_len, img_len, channels, vocab_size]. """ del targets # unused arg with tf.variable_scope("image_channel_compress_modality"): hidden_size = model_hparams.hidden_size img_len = model_hparams.img_len channels = 3 # RGB batch = common_layers.shape_list(body_output)[0] x = tf.layers.conv2d( body_output, hidden_size * channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", activation=tf.nn.relu, name="decompress_conv") x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_size]) x = common_layers.layer_preprocess(x, model_hparams) x = tf.layers.dense(x, vocab_size, use_bias=True, activation=None, name="output_conv") x = tf.reshape( x, [batch, img_len, img_len, channels, vocab_size]) return x
Top transformation for images.
def image_channel_embeddings_top(body_output, targets, model_hparams, vocab_size): """Top transformation for images.""" del targets # unused arg with tf.variable_scope("image_channel_embeddings_bottom"): img_len = model_hparams.img_len channels = model_hparams.num_channels x = tf.layers.dense( body_output, 256, use_bias=True, activation=None, name="output_conv") x = tf.reshape(x, [-1, img_len, img_len, channels, vocab_size]) return x
Loss for class label.
def softmax_last_timestep_class_label_top(body_output, targets, model_hparams, vocab_size): """Loss for class label.""" del targets # unused arg with tf.variable_scope( "softmax_last_timestep_onehot_class_label_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size)): x = body_output x = tf.expand_dims(x[:, -1], 1) # Pick the last timestep return tf.layers.dense(x, vocab_size)
Loss for class label.
def softmax_max_pooling_class_label_top(body_output, targets, model_hparams, vocab_size): """Loss for class label.""" del targets # unused arg with tf.variable_scope( "softmax_max_pooling_onehot_class_label_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size)): x = body_output x = tf.reduce_max(x, axis=1, keepdims=True) return tf.layers.dense(x, vocab_size)
Generate logits. Args: body_output: A Tensor with shape [batch, p0, p1, model_hparams.hidden_size]. targets: Unused. model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].
def symbol_top(body_output, targets, model_hparams, vocab_size): """Generate logits. Args: body_output: A Tensor with shape [batch, p0, p1, model_hparams.hidden_size]. targets: Unused. model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: logits: A Tensor with shape [batch, p0, p1, ?, vocab_size]. """ del targets # unused arg if model_hparams.shared_embedding_and_softmax_weights: scope_name = "shared" reuse = tf.AUTO_REUSE else: scope_name = "softmax" reuse = False with tf.variable_scope(scope_name, reuse=reuse): body_output_shape = common_layers.shape_list(body_output) var = get_weights(model_hparams, vocab_size, body_output_shape[-1]) if (model_hparams.factored_logits and model_hparams.mode == tf.estimator.ModeKeys.TRAIN): # insert channels dimension body_output = tf.expand_dims(body_output, 3) return common_layers.FactoredTensor(body_output, var) else: body_output = tf.reshape(body_output, [-1, body_output_shape[-1]]) logits = tf.matmul(body_output, var, transpose_b=True) return tf.reshape(logits, body_output_shape[:-1] + [1, vocab_size])
Top transformation for video.
def video_top(body_output, targets, model_hparams, vocab_size): """Top transformation for video.""" del targets # unused arg num_channels = model_hparams.problem.num_channels shape = common_layers.shape_list(body_output) reshape_shape = shape[:-1] + [num_channels, vocab_size] res = tf.reshape(body_output, reshape_shape) # Calculate argmax so as to have a summary with the produced images. x = tf.argmax(tf.reshape(res, [-1, vocab_size]), axis=-1) x = tf.reshape(x, shape[:-1] + [num_channels]) common_video.gif_summary("results", x, max_outputs=1) return res
Top transformation for video.
def video_l1_top(body_output, targets, model_hparams, vocab_size): """Top transformation for video.""" del targets, vocab_size # unused arg num_channels = model_hparams.problem.num_channels num_frames = model_hparams.video_num_target_frames with tf.variable_scope("rgb"): body_output_shape = common_layers.shape_list(body_output) res = tf.layers.dense(body_output, num_channels * num_frames, name="cast") res = tf.reshape(res, body_output_shape[:3] + [num_channels, num_frames]) res = tf.transpose(res, [0, 4, 1, 2, 3]) # Move frames next to batch. if not tf.get_variable_scope().reuse: res_argmax = res[:, -1, :, :, :] tf.summary.image( "result", common_layers.tpu_safe_image_summary(res_argmax), max_outputs=1) return tf.expand_dims(res, axis=-1)
Gets default bottom transformation; if none available, return value.
def get_bottom(modality_type, value=None): """Gets default bottom transformation; if none available, return value.""" if modality_type == ModalityType.AUDIO: return audio_bottom elif modality_type == ModalityType.AUDIO_SPECTRAL: return audio_spectral_bottom elif modality_type in (ModalityType.CLASS_LABEL, ModalityType.MULTI_LABEL, ModalityType.ONE_HOT_CLASS_LABEL, ModalityType.SIGMOID_CLASS_LABEL, ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL, ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL, ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL, ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL): return class_label_bottom elif modality_type in (ModalityType.CTC_SYMBOL, ModalityType.SYMBOL, ModalityType.SYMBOL_WEIGHTS_ALL): return symbol_bottom elif modality_type in (ModalityType.GENERIC_L2_LOSS, ModalityType.IDENTITY, ModalityType.IDENTITY_SYMBOL, ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM): return identity_bottom elif modality_type == ModalityType.IMAGE: return image_bottom elif modality_type in (ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, ModalityType.IMAGE_CHANNEL_COMPRESS): return image_channel_compress_bottom elif modality_type in (ModalityType.REAL, ModalityType.REAL_L2_LOSS, ModalityType.REAL_LOG_POISSON_LOSS): return real_bottom elif modality_type == ModalityType.SPEECH_RECOGNITION: return speech_recognition_bottom elif modality_type == ModalityType.SYMBOL_ONE_HOT: return symbol_one_hot_bottom elif modality_type in (ModalityType.VIDEO, ModalityType.VIDEO_L1, ModalityType.VIDEO_L2): return video_bottom elif modality_type == ModalityType.VIDEO_BITWISE: return video_bitwise_bottom elif modality_type == ModalityType.VIDEO_IDENTITY: return video_identity_bottom elif modality_type in (ModalityType.VIDEO_L1_RAW, ModalityType.VIDEO_L2_RAW): return video_raw_bottom elif modality_type == ModalityType.VIDEO_PIXEL_NOISE: return video_pixel_noise_bottom return value
Gets default loss transformation; if none available, return value.
def get_loss(modality_type, value=None): """Gets default loss transformation; if none available, return value.""" if modality_type in (ModalityType.AUDIO, ModalityType.AUDIO_SPECTRAL, ModalityType.CLASS_LABEL, ModalityType.IDENTITY, ModalityType.IDENTITY_SYMBOL, ModalityType.IMAGE, ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, ModalityType.IMAGE_CHANNEL_COMPRESS, ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM, ModalityType.REAL, ModalityType.SPEECH_RECOGNITION, ModalityType.SYMBOL, ModalityType.SYMBOL_WEIGHTS_ALL): return generic_loss elif modality_type == ModalityType.CTC_SYMBOL: return ctc_symbol_loss elif modality_type == ModalityType.GENERIC_L2_LOSS: return generic_l2_loss elif modality_type == ModalityType.MULTI_LABEL: return multi_label_loss elif modality_type in (ModalityType.ONE_HOT_CLASS_LABEL, ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL, ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL, ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL): return one_hot_class_label_loss elif modality_type == ModalityType.REAL_L2_LOSS: return real_l2_loss elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS: return real_log_poisson_loss elif modality_type == ModalityType.SIGMOID_CLASS_LABEL: return sigmoid_class_label_loss elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL: return sigmoid_max_pooling_class_label_loss elif modality_type == ModalityType.SYMBOL_ONE_HOT: return symbol_one_hot_loss elif modality_type in (ModalityType.VIDEO, ModalityType.VIDEO_BITWISE, ModalityType.VIDEO_PIXEL_NOISE): return video_loss elif modality_type == ModalityType.VIDEO_IDENTITY: return video_identity_loss elif modality_type == ModalityType.VIDEO_L1: return video_l1_loss elif modality_type == ModalityType.VIDEO_L1_RAW: return video_l1_raw_loss elif modality_type == ModalityType.VIDEO_L2: return video_l2_loss elif modality_type == ModalityType.VIDEO_L2_RAW: return video_l2_raw_loss return value
Gets default name for transformations; if none available, return value.
def get_name(modality_type, value=None): """Gets default name for transformations; if none available, return value.""" # For legacy reasons, modalities vary in their naming scheme. Future plans are # to remove any need for get_name. We do not recommend using it. if modality_type == ModalityType.AUDIO: return lambda model_hparams, vocab_size: "audio_modality" elif modality_type == ModalityType.AUDIO_SPECTRAL: return lambda model_hparams, vocab_size: "audio_spectral_modality" elif modality_type == ModalityType.GENERIC_L2_LOSS: return lambda model_hparams, vocab_size: "generic_l2_loss_modality" elif modality_type == ModalityType.IDENTITY: return lambda model_hparams, vocab_size: "identity_modality" elif modality_type == ModalityType.IMAGE: return lambda model_hparams, vocab_size: "image_modality" elif modality_type == ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY: return (lambda model_hparams, vocab_size: # pylint: disable=g-long-lambda "image_channel_bottom_identity_modality") elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS: return lambda model_hparams, vocab_size: "image_channel_compress_modality" elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM: return lambda model_hparams, vocab_size: "image_channel_embeddings_bottom" elif modality_type == ModalityType.REAL: return lambda model_hparams, vocab_size: "real_modality" elif modality_type == ModalityType.REAL_L2_LOSS: return lambda model_hparams, vocab_size: "real_l2_loss_modality" elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS: return lambda model_hparams, vocab_size: "real_log_poisson_loss_modality" elif modality_type == ModalityType.SPEECH_RECOGNITION: return lambda model_hparams, vocab_size: "speech_recognition_modality" elif modality_type == ModalityType.VIDEO: return lambda model_hparams, vocab_size: "video_modality" elif modality_type == ModalityType.VIDEO_BITWISE: return lambda model_hparams, vocab_size: "video_modality_bitwise" elif modality_type == ModalityType.VIDEO_IDENTITY: return lambda model_hparams, vocab_size: "video_modality_identity" elif modality_type == ModalityType.VIDEO_L1: return lambda model_hparams, vocab_size: "video_modality_l1" elif modality_type == ModalityType.VIDEO_L1_RAW: return lambda model_hparams, vocab_size: "video_modality_l1_raw" elif modality_type == ModalityType.VIDEO_L2: return lambda model_hparams, vocab_size: "video_modality_l2" elif modality_type == ModalityType.VIDEO_L2_RAW: return lambda model_hparams, vocab_size: "video_modality_l2_raw" elif modality_type == ModalityType.VIDEO_PIXEL_NOISE: return lambda model_hparams, vocab_size: "video_modality_pixel_noise" elif modality_type in (ModalityType.CLASS_LABEL, ModalityType.MULTI_LABEL, ModalityType.ONE_HOT_CLASS_LABEL): def name(model_hparams, vocab_size): return "class_label_modality_%d_%d" % (vocab_size, model_hparams.hidden_size) return name elif modality_type in (ModalityType.CTC_SYMBOL, ModalityType.IDENTITY_SYMBOL, ModalityType.SYMBOL, ModalityType.SYMBOL_WEIGHTS_ALL, ModalityType.SYMBOL_ONE_HOT): def name(model_hparams, vocab_size): return "symbol_modality_%d_%d" % (vocab_size, model_hparams.hidden_size) return name elif modality_type == ModalityType.SIGMOID_CLASS_LABEL: def name(model_hparams, vocab_size): return "sigmoid_class_symbol_modality_%d_%d" % (vocab_size, model_hparams.hidden_size) return name elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL: def name(model_hparams, vocab_size): return "sigmoid_max_pooling_class_symbol_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size) return name elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL: def name(model_hparams, vocab_size): return "softmax_average_pooling_onehot_class_label_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size) return name elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL: def name(model_hparams, vocab_size): return "softmax_last_timestep_onehot_class_label_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size) return name elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL: def name(model_hparams, vocab_size): return "softmax_max_pooling_onehot_class_label_modality_%d_%d" % ( vocab_size, model_hparams.hidden_size) return name return value
Gets default bottom transformation for targets; if none, return value.
def get_targets_bottom(modality_type, value=None): """Gets default bottom transformation for targets; if none, return value.""" if modality_type == ModalityType.AUDIO: return make_targets_bottom(audio_bottom) elif modality_type == ModalityType.AUDIO_SPECTRAL: return make_targets_bottom(audio_spectral_bottom) elif modality_type in (ModalityType.CLASS_LABEL, ModalityType.MULTI_LABEL, ModalityType.ONE_HOT_CLASS_LABEL, ModalityType.SIGMOID_CLASS_LABEL, ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL, ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL, ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL, ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL): return class_label_targets_bottom elif modality_type in (ModalityType.CTC_SYMBOL, ModalityType.SYMBOL, ModalityType.SYMBOL_WEIGHTS_ALL): return symbol_targets_bottom elif modality_type in (ModalityType.GENERIC_L2_LOSS, ModalityType.IDENTITY_SYMBOL): return identity_bottom elif modality_type == ModalityType.IDENTITY: return make_targets_bottom(identity_bottom) elif modality_type == ModalityType.IMAGE: return image_targets_bottom elif modality_type in (ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, ModalityType.IMAGE_CHANNEL_COMPRESS): return image_channel_compress_targets_bottom elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM: return image_channel_embeddings_bottom elif modality_type in (ModalityType.REAL, ModalityType.REAL_L2_LOSS, ModalityType.REAL_LOG_POISSON_LOSS): return make_targets_bottom(real_bottom) elif modality_type == ModalityType.SPEECH_RECOGNITION: return make_targets_bottom(speech_recognition_bottom) elif modality_type == ModalityType.SYMBOL_ONE_HOT: return symbol_one_hot_bottom elif modality_type in (ModalityType.VIDEO, ModalityType.VIDEO_L1, ModalityType.VIDEO_L2): return video_targets_bottom elif modality_type == ModalityType.VIDEO_BITWISE: return video_bitwise_targets_bottom elif modality_type == ModalityType.VIDEO_IDENTITY: return video_identity_targets_bottom elif modality_type in (ModalityType.VIDEO_L1_RAW, ModalityType.VIDEO_L2_RAW): return video_raw_targets_bottom elif modality_type == ModalityType.VIDEO_PIXEL_NOISE: return make_targets_bottom(video_pixel_noise_bottom) return value
Gets default top transformation; if none available, return value.
def get_top(modality_type, value=None): """Gets default top transformation; if none available, return value.""" if modality_type in (ModalityType.AUDIO, ModalityType.AUDIO_SPECTRAL, ModalityType.GENERIC_L2_LOSS, ModalityType.IDENTITY, ModalityType.IDENTITY_SYMBOL, ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, ModalityType.SPEECH_RECOGNITION, ModalityType.VIDEO_IDENTITY): return identity_top elif modality_type in (ModalityType.CLASS_LABEL, ModalityType.MULTI_LABEL, ModalityType.ONE_HOT_CLASS_LABEL, ModalityType.SIGMOID_CLASS_LABEL): return class_label_top elif modality_type in (ModalityType.CTC_SYMBOL, ModalityType.SYMBOL, ModalityType.SYMBOL_WEIGHTS_ALL): return symbol_top elif modality_type == ModalityType.IMAGE: return image_top elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS: return image_channel_compress_top elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM: return image_channel_embeddings_top elif modality_type in (ModalityType.REAL, ModalityType.REAL_L2_LOSS, ModalityType.REAL_LOG_POISSON_LOSS): return real_top elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL: return sigmoid_max_pooling_class_label_top elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL: return softmax_average_pooling_class_label_top elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL: return softmax_last_timestep_class_label_top elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL: return softmax_max_pooling_class_label_top elif modality_type == ModalityType.SYMBOL_ONE_HOT: return symbol_one_hot_top elif modality_type in (ModalityType.VIDEO, ModalityType.VIDEO_BITWISE, ModalityType.VIDEO_PIXEL_NOISE): return video_top elif modality_type in (ModalityType.VIDEO_L1, ModalityType.VIDEO_L2): return video_l1_top elif modality_type in (ModalityType.VIDEO_L1_RAW, ModalityType.VIDEO_L2_RAW): return video_raw_top return value
Gets default weights function; if none available, return value.
def get_weights_fn(modality_type, value=None): """Gets default weights function; if none available, return value.""" if modality_type in (ModalityType.CTC_SYMBOL, ModalityType.IDENTITY_SYMBOL, ModalityType.MULTI_LABEL, ModalityType.SYMBOL, ModalityType.SYMBOL_ONE_HOT): return common_layers.weights_nonzero elif modality_type in ModalityType.get_choices(): return common_layers.weights_all return value
Generates all possible pair combinations for the input list of sentences. For example: input = ["paraphrase1", "paraphrase2", "paraphrase3"] output = [("paraphrase1", "paraphrase2"), ("paraphrase1", "paraphrase3"), ("paraphrase2", "paraphrase3")] Args: list_of_sentences: the list of input sentences. Returns: the list of all possible sentence pairs.
def create_combination(list_of_sentences): """Generates all possible pair combinations for the input list of sentences. For example: input = ["paraphrase1", "paraphrase2", "paraphrase3"] output = [("paraphrase1", "paraphrase2"), ("paraphrase1", "paraphrase3"), ("paraphrase2", "paraphrase3")] Args: list_of_sentences: the list of input sentences. Returns: the list of all possible sentence pairs. """ num_sentences = len(list_of_sentences) - 1 combinations = [] for i, _ in enumerate(list_of_sentences): if i == num_sentences: break num_pairs = num_sentences - i populated = num_pairs * [list_of_sentences[i]] zipped = list(zip(populated, list_of_sentences[i + 1:])) combinations += zipped return combinations
Set of hyperparameters.
def image_transformer2d_base(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.hidden_size = 512 hparams.batch_size = 1 hparams.max_length = 256 hparams.dropout = 0.0 hparams.clip_grad_norm = 0. # i.e. no gradient clipping hparams.optimizer_adam_epsilon = 1e-9 hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.learning_rate_warmup_steps = 4000 hparams.initializer_gain = 0.2 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 hparams.label_smoothing = 0.0 hparams.bottom["targets"] = modalities.make_targets_bottom( modalities.image_channel_embeddings_bottom) hparams.top["targets"] = modalities.identity_top hparams.norm_type = "layer" hparams.layer_prepostprocess_dropout = 0.0 hparams.add_hparam("filter_size", 512) # Add new ones like this. # attention-related flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("ffn_layer", "conv_hidden_relu") # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. hparams.add_hparam("attention_dropout", 0.0) hparams.add_hparam("relu_dropout", 0.0) hparams.add_hparam("pos", "timing") # timing, none hparams.add_hparam("nbr_decoder_problems", 1) hparams.add_hparam("num_output_layers", 3) hparams.add_hparam("block_size", 1) # image size related flags # assuming that the image has same height and width hparams.add_hparam("img_len", 32) hparams.add_hparam("num_channels", 3) # Local attention params hparams.add_hparam("local_and_global_att", False) hparams.add_hparam("block_length", 256) hparams.add_hparam("block_width", 128) # Local 2D attention params hparams.add_hparam("query_shape", (16, 16)) hparams.add_hparam("memory_flange", (16, 32)) hparams.add_hparam("num_encoder_layers", 4) hparams.add_hparam("num_decoder_layers", 8) # attention type related params hparams.add_hparam("enc_attention_type", cia.AttentionType.GLOBAL) hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_2D) hparams.add_hparam("block_raster_scan", False) # multipos attention params hparams.add_hparam("q_filter_width", 1) hparams.add_hparam("kv_filter_width", 1) hparams.add_hparam("unconditional", False) # unconditional generation # relative embedding hparams hparams.add_hparam("shared_rel", False) return hparams
hparams fo 8 layer big 2d model for cifar 10.
def imagetransformer2d_base_8l_8_32_big(): """hparams fo 8 layer big 2d model for cifar 10.""" hparams = image_transformer2d_base() hparams.num_heads = 16 hparams.hidden_size = 1024 hparams.filter_size = 2048 hparams.num_decoder_layers = 8 hparams.batch_size = 1 hparams.layer_prepostprocess_dropout = 0.3 hparams.query_shape = (8, 16) hparams.memory_flange = (0, 32) hparams.unconditional = int(False) return hparams
big 1d model for unconditional generation on imagenet.
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64_2d(): """big 1d model for unconditional generation on imagenet.""" hparams = image_transformer2d_base() hparams.unconditional = True hparams.hidden_size = 512 hparams.batch_size = 1 hparams.img_len = 64 hparams.num_heads = 8 hparams.filter_size = 2048 hparams.batch_size = 1 hparams.max_length = 3075 hparams.max_length = 14000 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.layer_prepostprocess_dropout = 0.1 hparams.dec_attention_type = cia.AttentionType.LOCAL_2D hparams.query_shape = (16, 16) hparams.memory_flange = (8, 8) return hparams
Base params for img2img 2d attention.
def img2img_transformer2d_base(): """Base params for img2img 2d attention.""" hparams = image_transformer2d_base() # learning related flags hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" # This version seems to benefit from a higher learning rate. hparams.learning_rate = 0.2 hparams.layer_prepostprocess_dropout = 0.1 hparams.learning_rate_warmup_steps = 12000 hparams.filter_size = 2048 hparams.num_encoder_layers = 4 hparams.num_decoder_layers = 8 hparams.bottom["inputs"] = modalities.image_channel_embeddings_bottom hparams.dec_attention_type = cia.AttentionType.LOCAL_2D hparams.block_raster_scan = True return hparams
Current best hparams for local 2d.
def img2img_transformer2d_q3(): """Current best hparams for local 2d.""" hparams = img2img_transformer2d_q1() hparams.batch_size = 2 hparams.query_shape = (8, 16) hparams.memory_flange = (8, 32) return hparams
Base params for local1d attention.
def img2img_transformer_base(): """Base params for local1d attention.""" hparams = image_transformer2d_base() # learning related flags hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" # This version seems to benefit from a higher learning rate. hparams.learning_rate = 0.2 hparams.layer_prepostprocess_dropout = 0.1 hparams.learning_rate_warmup_steps = 12000 hparams.filter_size = 2048 hparams.num_encoder_layers = 4 hparams.num_decoder_layers = 8 hparams.block_length = 256 hparams.block_width = 256 hparams.dec_attention_type = cia.AttentionType.LOCAL_1D hparams.block_raster_scan = False return hparams
Current best hparams for local 1d.
def img2img_transformer_b3(): """Current best hparams for local 1d.""" hparams = img2img_transformer_base() hparams.batch_size = 2 hparams.layer_preprocess_sequence = "none" hparams.layer_postprocess_sequence = "dan" hparams.block_length = 128 hparams.sampling_temp = 0.9 return hparams
Try dilated.
def img2img_transformer_dilated(): """Try dilated.""" hparams = img2img_transformer_base() hparams.add_hparam("num_memory_blocks", 1) hparams.num_heads = 8 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.num_decoder_layers = 8 hparams.sampling_method = "random" hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0] hparams.dec_attention_type = cia.AttentionType.DILATED hparams.img_len = 64 hparams.block_length = 128 hparams.block_width = 128 return hparams
Hparams for training img2img_transformer on tpu.
def img2img_transformer_base_tpu(): """Hparams for training img2img_transformer on tpu.""" hparams = img2img_transformer_base() update_hparams_for_tpu(hparams) hparams.batch_size = 2 hparams.num_heads = 4 # heads are expensive on tpu hparams.num_decoder_layers = 8 hparams.num_encoder_layers = 4 hparams.shared_embedding_and_softmax_weights = False return hparams
Set of hyperparameters.
def img2img_transformer2d_n31(): """Set of hyperparameters.""" hparams = img2img_transformer2d_base() hparams.batch_size = 1 hparams.num_encoder_layers = 6 hparams.num_decoder_layers = 12 hparams.num_heads = 8 hparams.query_shape = (16, 32) hparams.memory_flange = (16, 32) return hparams
Set of hyperparameters.
def img2img_transformer2d_n24(): """Set of hyperparameters.""" hparams = img2img_transformer2d_base() hparams.batch_size = 1 hparams.hidden_size = 1024 hparams.filter_size = 2048 hparams.layer_prepostprocess_dropout = 0.2 hparams.num_decoder_layers = 8 hparams.query_shape = (8, 16) hparams.memory_flange = (8, 32) return hparams
Tiny params.
def img2img_transformer2d_tiny(): """Tiny params.""" hparams = img2img_transformer2d_base() hparams.num_decoder_layers = 2 hparams.hidden_size = 128 hparams.batch_size = 4 hparams.max_length = 128 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.filter_size = 128 hparams.num_heads = 4 hparams.pos = "timing" hparams.img_len = 32 return hparams
Tiny params.
def img2img_transformer_tiny(): """Tiny params.""" hparams = img2img_transformer2d_base() hparams.num_hidden_layers = 2 hparams.hidden_size = 128 hparams.batch_size = 4 hparams.max_length = 128 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.filter_size = 128 hparams.num_heads = 1 hparams.pos = "timing" return hparams
Residual feed-forward layer with normalization at start.
def ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode): """Residual feed-forward layer with normalization at start.""" return layers.Residual( layers.LayerNorm(), layers.Dense(feedforward_depth), layers.Relu(), layers.Dropout(rate=dropout, mode=mode), layers.Dense(feature_depth), layers.Dropout(rate=dropout, mode=mode) )
Transformer encoder layer. The input to the encoder is a pair (embedded source, mask) where the mask is created from the original source to prevent attending to the padding part of the input. Args: feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) mode: str: 'train' or 'eval' Returns: the layer, returning a pair (actiavtions, mask).
def EncoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode): """Transformer encoder layer. The input to the encoder is a pair (embedded source, mask) where the mask is created from the original source to prevent attending to the padding part of the input. Args: feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) mode: str: 'train' or 'eval' Returns: the layer, returning a pair (actiavtions, mask). """ # The encoder block expects (activation, mask) as input and returns # the new activations only, we add the mask back to output next. encoder_block = layers.Serial( layers.Residual( # Attention block here. layers.Parallel(layers.LayerNorm(), layers.Identity()), layers.MultiHeadedAttention(feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), layers.Dropout(rate=dropout, mode=mode), shortcut=layers.FirstBranch() ), ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode) ) # Now we add the mask back. return layers.Serial( layers.Reorder(output=((0, 1), 1)), # (x, mask) --> ((x, mask), mask) layers.Parallel(encoder_block, layers.Identity()) )
Transformer encoder. Args: vocab_size: int: vocab size num_classes: how many classes on output feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_layers: int: number of encoder/decoder layers num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) max_len: int: maximum symbol length for positional encoding mode: str: 'train' or 'eval' Returns: the Transformer encoder layer.
def TransformerEncoder(vocab_size, num_classes=10, feature_depth=512, feedforward_depth=2048, num_layers=6, num_heads=8, dropout=0.1, max_len=2048, mode='train'): """Transformer encoder. Args: vocab_size: int: vocab size num_classes: how many classes on output feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_layers: int: number of encoder/decoder layers num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) max_len: int: maximum symbol length for positional encoding mode: str: 'train' or 'eval' Returns: the Transformer encoder layer. """ input_embedding = layers.Serial( layers.Embedding(feature_depth, vocab_size), layers.Dropout(rate=dropout, mode=mode), layers.PositionalEncoding(max_len=max_len) ) return layers.Serial( layers.Branch(), # Branch input to create embedding and mask. layers.Parallel(input_embedding, layers.PaddingMask()), layers.Serial(*[EncoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode) for _ in range(num_layers)]), layers.FirstBranch(), # Drop the mask. layers.LayerNorm(), layers.Mean(axis=1), # Average on length. layers.Dense(num_classes), layers.LogSoftmax() )
Transformer decoder layer. Args: feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) mode: str: 'train' or 'eval' Returns: the layer.
def DecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode): """Transformer decoder layer. Args: feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) mode: str: 'train' or 'eval' Returns: the layer. """ return layers.Serial( layers.Residual( # Self-attention block. layers.LayerNorm(), layers.Branch(), layers.Parallel(layers.Identity(), # activation for (q, k, v) layers.CausalMask(axis=-2)), # attention mask layers.MultiHeadedAttention(feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), layers.Dropout(rate=dropout, mode=mode) ), ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode) )
Transformer language model (only uses the decoder part of Transformer). Args: vocab_size: int: vocab size feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_layers: int: number of encoder/decoder layers num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) max_len: int: maximum symbol length for positional encoding mode: str: 'train' or 'eval' Returns: the layer.
def TransformerLM(vocab_size, feature_depth=512, feedforward_depth=2048, num_layers=6, num_heads=8, dropout=0.1, max_len=2048, mode='train'): """Transformer language model (only uses the decoder part of Transformer). Args: vocab_size: int: vocab size feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_layers: int: number of encoder/decoder layers num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) max_len: int: maximum symbol length for positional encoding mode: str: 'train' or 'eval' Returns: the layer. """ return layers.Serial( layers.ShiftRight(), layers.Embedding(feature_depth, vocab_size), layers.Dropout(rate=dropout, mode=mode), layers.PositionalEncoding(max_len=max_len), layers.Serial(*[DecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode) for _ in range(num_layers)]), layers.LayerNorm(), layers.Dense(vocab_size), layers.LogSoftmax() )
Transformer decoder layer operating on chunks. Args: feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) chunk_selector: a function from chunk number to list of chunks to attend. mode: str: 'train' or 'eval' Returns: the layer.
def ChunkedDecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, chunk_selector, mode): """Transformer decoder layer operating on chunks. Args: feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) chunk_selector: a function from chunk number to list of chunks to attend. mode: str: 'train' or 'eval' Returns: the layer. """ return layers.Serial( layers.Residual( # Self-attention block. layers.Map(layers.LayerNorm()), layers.ChunkedCausalMultiHeadedAttention( feature_depth, num_heads=num_heads, dropout=dropout, chunk_selector=chunk_selector, mode=mode), layers.Map(layers.Dropout(rate=dropout, mode=mode)), ), layers.Map(ResidualFeedForward( feature_depth, feedforward_depth, dropout, mode=mode)) )
Transformer language model operating on chunks. The input to this model is a sequence presented as a list or tuple of chunks: (chunk1, chunk2, chunks3, ..., chunkN). Each chunk should have the same shape (batch, chunk-length) and together they represent a long sequence that's a concatenation chunk1,chunk2,...,chunkN. Chunked Transformer emulates the operation of a Transformer on this long sequence except for the chunked attention layer, which may attend to only a subset of the chunks to reduce memory use. Args: vocab_size: int: vocab size feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_layers: int: number of encoder/decoder layers num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) chunk_selector: a function from chunk number to list of chunks to attend (if None, attends to the previous chunks which is equivalent to setting chunk_selector(x) = [] if x < 1 else [x-1] (TransformerXL); we attend to the current chunk with a causal mask too, selected chunks unmasked). max_len: int: maximum symbol length for positional encoding mode: str: 'train' or 'eval' Returns: the layer.
def ChunkedTransformerLM(vocab_size, feature_depth=512, feedforward_depth=2048, num_layers=6, num_heads=8, dropout=0.1, chunk_selector=None, max_len=2048, mode='train'): """Transformer language model operating on chunks. The input to this model is a sequence presented as a list or tuple of chunks: (chunk1, chunk2, chunks3, ..., chunkN). Each chunk should have the same shape (batch, chunk-length) and together they represent a long sequence that's a concatenation chunk1,chunk2,...,chunkN. Chunked Transformer emulates the operation of a Transformer on this long sequence except for the chunked attention layer, which may attend to only a subset of the chunks to reduce memory use. Args: vocab_size: int: vocab size feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_layers: int: number of encoder/decoder layers num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) chunk_selector: a function from chunk number to list of chunks to attend (if None, attends to the previous chunks which is equivalent to setting chunk_selector(x) = [] if x < 1 else [x-1] (TransformerXL); we attend to the current chunk with a causal mask too, selected chunks unmasked). max_len: int: maximum symbol length for positional encoding mode: str: 'train' or 'eval' Returns: the layer. """ stack = [ChunkedDecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, chunk_selector, mode) for _ in range(num_layers)] # Below each Map(L) applies the layer L to each chunk independently. return layers.Serial( layers.ShiftRight(), layers.Map(layers.Embedding(feature_depth, vocab_size)), layers.Map(layers.Dropout(rate=dropout, mode=mode)), layers.PositionalEncoding(max_len=max_len), layers.Serial(*stack), layers.Map(layers.LayerNorm()), layers.Map(layers.Dense(vocab_size)), layers.Map(layers.LogSoftmax()), )
Transformer model. Args: source_vocab_size: int: source vocab size target_vocab_size: int: target vocab size mode: str: 'train' or 'eval' num_layers: int: number of encoder/decoder layers feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) shared_embedding: bool: specify whether source/target embeddings are tied. max_len: int: maximum symbol length for positional encoding return_evals: bool: whether to generate decode-time evaluation functions Returns: A namedtuple containing model 'init' and 'apply' functions for training and the 'evals' functions that itself returns a namedtuple containing evaluation functions for the trained encoder, decoder, and generator substax.
def Transformer(source_vocab_size, target_vocab_size, mode='train', num_layers=6, feature_depth=512, feedforward_depth=2048, num_heads=8, dropout=0.1, shared_embedding=True, max_len=200, return_evals=False): """Transformer model. Args: source_vocab_size: int: source vocab size target_vocab_size: int: target vocab size mode: str: 'train' or 'eval' num_layers: int: number of encoder/decoder layers feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) shared_embedding: bool: specify whether source/target embeddings are tied. max_len: int: maximum symbol length for positional encoding return_evals: bool: whether to generate decode-time evaluation functions Returns: A namedtuple containing model 'init' and 'apply' functions for training and the 'evals' functions that itself returns a namedtuple containing evaluation functions for the trained encoder, decoder, and generator substax. """ # Input embedding and positional encoding inject_position = layers.Serial( layers.Dropout(dropout, mode=mode), layers.PositionalEncoding(feature_depth, max_len=max_len) ) if shared_embedding: assert source_vocab_size == target_vocab_size # Weight-shared Embedding embedding = layers.Share(layers.Embedding(feature_depth, source_vocab_size)) source_embedding_layer = layers.Serial(embedding, inject_position) target_embedding_layer = source_embedding_layer else: source_embedding = layers.Embedding(feature_depth, source_vocab_size) target_embedding = layers.Embedding(feature_depth, target_vocab_size) source_embedding_layer = layers.Serial(source_embedding, inject_position) target_embedding_layer = layers.Serial(target_embedding, inject_position) # Multi-headed Attention and Feed-forward layers multi_attention = layers.MultiHeadedAttention( feature_depth, num_heads=num_heads, dropout=dropout, mode=mode) # Encoder @layers.Lambda def Encoder(source, source_mask): """Transformer encoder stack. Args: source: layer variable: raw source sequences source_mask: layer variable: self-attention mask Returns: Layer variable that outputs encoded source. """ encoder_layer = layers.Serial( # input attends to self layers.Residual(layers.LayerNorm(), layers.Branch(size=4), layers.Parallel(layers.Identity(), # query layers.Identity(), # key layers.Identity(), # value source_mask), # attention mask multi_attention, layers.Dropout(dropout, mode=mode)), # feed-forward ResidualFeedForward( feature_depth, feedforward_depth, dropout, mode=mode), ) return layers.Serial( source, source_embedding_layer, layers.repeat(encoder_layer, num_layers), layers.LayerNorm(), ) # Decoder @layers.Lambda def Decoder(memory, target, target_mask, memory_mask): """Transformer decoder stack. Args: memory: layer variable: encoded source sequences target: layer variable: raw target sequences target_mask: layer variable: self-attention mask memory_mask: layer variable: memory attention mask Returns: Layer variable that outputs encoded source. """ decoder_layer = layers.Serial( # target attends to self layers.Residual(layers.LayerNorm(), layers.Branch(size=4), layers.Parallel(layers.Identity(), # query layers.Identity(), # key layers.Identity(), # value target_mask), # attention mask multi_attention, layers.Dropout(dropout, mode=mode)), # target attends to encoded source layers.Residual(layers.LayerNorm(), layers.Branch(size=4), layers.Parallel(layers.Identity(), # query memory, # key memory, # value memory_mask), # attention mask multi_attention, layers.Dropout(dropout, mode=mode)), # feed-forward ResidualFeedForward( feature_depth, feedforward_depth, dropout, mode=mode) ) return layers.Serial( target, target_embedding_layer, layers.repeat(decoder_layer, num_layers), layers.LayerNorm(), ) # The Transformer @layers.Lambda def transformer(source, target, source_mask, target_mask, memory_mask): # pylint: disable=invalid-name encoded_source = Encoder(source, source_mask) return Decoder(encoded_source, target, target_mask, memory_mask) # Finally, bind the generator transform to use later for inference. @layers.Lambda def Generator(encoded_target): return layers.Serial( encoded_target, layers.Dense(target_vocab_size), layers.LogSoftmax ) # Model-Building and Evaluation Functions # Get entire model's the layer pair top_init, top_apply = Generator(transformer) # By default act as a normal constructor and emit an (init, apply) pair. if not return_evals: return (top_init, top_apply) else: raise ValueError('inference in this model is still a work in progress')
Catch bugs locally...
def mtf_transformer_tiny(): """Catch bugs locally...""" hparams = mtf_transformer_base() hparams.d_model = 128 hparams.d_ff = 512 hparams.batch_size = 8 hparams.encoder_layers = ["att", "drd"] * 2 hparams.decoder_layers = ["att", "enc_att", "drd"] * 2 hparams.num_heads = 8 # data parallelism and model-parallelism hparams.mesh_shape = "batch:2;model:4" hparams.activation_dtype = "float32" return hparams
Config for language-model experiments. Train these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs) The size parameter is an integer that controls the number of heads and the size of the size of the feedforward hidden layers. Increasing size by 1 doubles each of these. Results: size params/10^9 log-ppl(per-token) -1 0.14 3.209 0 0.22 3.119 1 0.37 3.037 2 0.67 2.969 3 1.28 2.912 4 2.48 2.874 5 4.90 2.871 (to get word-level log-ppl, multiply by 1.1078) Args: size: an integer Returns: a hparams object
def mtf_transformer_paper_lm(size): """Config for language-model experiments. Train these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs) The size parameter is an integer that controls the number of heads and the size of the size of the feedforward hidden layers. Increasing size by 1 doubles each of these. Results: size params/10^9 log-ppl(per-token) -1 0.14 3.209 0 0.22 3.119 1 0.37 3.037 2 0.67 2.969 3 1.28 2.912 4 2.48 2.874 5 4.90 2.871 (to get word-level log-ppl, multiply by 1.1078) Args: size: an integer Returns: a hparams object """ n = 2 ** size hparams = mtf_transformer_base_lm() hparams.batch_size = 256 hparams.d_model = 1024 hparams.d_ff = int(8192 * n) hparams.d_kv = 256 hparams.num_heads = int(8 * n) hparams.shared_embedding_and_softmax_weights = False # one epoch for languagemodel_lm1b32k_packed = 13600 steps hparams.learning_rate_decay_steps = 13600 return hparams
Config for translation experiments. Train these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs) The size parameter is an integer that controls the number of heads and the size of the size of the feedforward hidden layers. Increasing size by 1 doubles each of these. Args: size: an integer Returns: a hparams object
def mtf_transformer_paper_tr(size): """Config for translation experiments. Train these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs) The size parameter is an integer that controls the number of heads and the size of the size of the feedforward hidden layers. Increasing size by 1 doubles each of these. Args: size: an integer Returns: a hparams object """ n = 2 ** size hparams = mtf_transformer_base() hparams.label_smoothing = 0.1 hparams.batch_size = 128 hparams.d_model = 1024 hparams.d_ff = int(4096 * n) hparams.num_heads = int(8 * n) hparams.shared_embedding_and_softmax_weights = False # one epoch for translate_enfr_wmt32k_packed = 51400 steps hparams.learning_rate_decay_steps = 51400 return hparams
Small language model to run on 1 TPU. Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs) Results: params/10^9 log-ppl(per-token) 0.14 3.202 Returns: a hparams
def mtf_transformer_lm_baseline(): """Small language model to run on 1 TPU. Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs) Results: params/10^9 log-ppl(per-token) 0.14 3.202 Returns: a hparams """ hparams = mtf_transformer_paper_lm(-1) hparams.batch_size = 128 hparams.learning_rate_decay_steps = 27200 # one epoch on lm1b hparams.mesh_shape = "batch:8" return hparams
Multihead scaled-dot-product attention with input/output transformations. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] or None bias: bias Tensor (see attention_bias()) total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() attention_type: a string, either "dot_product", "dot_product_relative", "local_mask_right", "local_unmasked", "masked_dilated_1d", "unmasked_dilated_1d", graph, or any attention function with the signature (query, key, value, **kwargs) name: an optional string. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. adjacency_matrix: an optional tensor of shape [batch, len_q, len_q] containing edge vectors for attention num_edge_types: number of edge types, an int vars_3d: use 3-dimensional variables for input/output transformations **kwargs (dict): Parameters for the attention function Returns: The result of the attention transformation. The output shape is [batch_size, length_q, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads.
def multihead_graph_attention(query_antecedent, memory_antecedent, bias, total_key_depth, total_value_depth, output_depth, num_heads, dropout_rate, image_shapes=None, attention_type="edge_vector", name="multihead_graph_attention", save_weights_to=None, make_image_summary=True, dropout_broadcast_dims=None, adjacency_matrix=None, num_edge_types=5, vars_3d=False, **kwargs): """Multihead scaled-dot-product attention with input/output transformations. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] or None bias: bias Tensor (see attention_bias()) total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() attention_type: a string, either "dot_product", "dot_product_relative", "local_mask_right", "local_unmasked", "masked_dilated_1d", "unmasked_dilated_1d", graph, or any attention function with the signature (query, key, value, **kwargs) name: an optional string. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. adjacency_matrix: an optional tensor of shape [batch, len_q, len_q] containing edge vectors for attention num_edge_types: number of edge types, an int vars_3d: use 3-dimensional variables for input/output transformations **kwargs (dict): Parameters for the attention function Returns: The result of the attention transformation. The output shape is [batch_size, length_q, output_depth] Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) vars_3d_num_heads = num_heads if vars_3d else None with tf.variable_scope( name, default_name="multihead_attention", values=[query_antecedent, memory_antecedent]): q, k, v = common_attention.compute_qkv( query_antecedent, memory_antecedent, total_key_depth, total_value_depth, vars_3d_num_heads=vars_3d_num_heads) q = common_attention.split_heads(q, num_heads) k = common_attention.split_heads(k, num_heads) v = common_attention.split_heads(v, num_heads) key_depth_per_head = total_key_depth // num_heads if not vars_3d: q *= key_depth_per_head**-0.5 additional_returned_value = None if callable(attention_type): # Generic way to extend multihead_attention x = attention_type(q, k, v, **kwargs) if isinstance(x, tuple): x, additional_returned_value = x # Unpack elif attention_type == "edge_vector": x = graph_attention( q, k, v, bias, dropout_rate, image_shapes, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, adjacency_matrix=adjacency_matrix, num_edge_types=num_edge_types) x = common_attention.combine_heads(x) # Set last dim specifically. x.set_shape(x.shape.as_list()[:-1] + [total_value_depth]) if vars_3d: o_var = tf.get_variable( "o", [num_heads, total_value_depth // num_heads, output_depth]) o_var = tf.reshape(o_var, [total_value_depth, output_depth]) x = tf.tensordot(x, o_var, axes=1) else: x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") if additional_returned_value is not None: return x, additional_returned_value return x
graph attention. Args: q: a Tensor with shape [batch, heads, length_q, depth_k] k: a Tensor with shape [batch, heads, length_kv, depth_k] v: a Tensor with shape [batch, heads, length_kv, depth_v] bias: bias Tensor (see attention_bias()) dropout_rate: a floating point number image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() name: an optional string make_image_summary: True if you want an image summary. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. adjacency_matrix: optional matrix of [batch, length, length] ids indicating edge type num_edge_types: an int indicating number of edge types Returns: A Tensor of shape [batch, length, depth(q)]
def graph_attention(q, k, v, bias, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, save_weights_to=None, dropout_broadcast_dims=None, adjacency_matrix=None, num_edge_types=5): """graph attention. Args: q: a Tensor with shape [batch, heads, length_q, depth_k] k: a Tensor with shape [batch, heads, length_kv, depth_k] v: a Tensor with shape [batch, heads, length_kv, depth_v] bias: bias Tensor (see attention_bias()) dropout_rate: a floating point number image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() name: an optional string make_image_summary: True if you want an image summary. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. adjacency_matrix: optional matrix of [batch, length, length] ids indicating edge type num_edge_types: an int indicating number of edge types Returns: A Tensor of shape [batch, length, depth(q)] """ with tf.variable_scope( name, default_name="dot_product_attention", values=[q, k, v]) as scope: # [batch, num_heads, query_length, memory_length] logits = tf.matmul(q, k, transpose_b=True) if adjacency_matrix is not None: key_head_depth = common_layers.shape_list(q)[-1] adjacency_vectors = make_edge_vectors( adjacency_matrix, num_edge_types, key_head_depth, name=name) # transposing q to be [batch, length_q, heads, depth_k] # to allow for matmul with [batch, length_q, length_q, depth_k] q_t = tf.transpose(q, [0, 2, 1, 3]) adj_logits = tf.matmul(q_t, adjacency_vectors, transpose_b=True) logits += tf.transpose(adj_logits, [0, 2, 1, 3]) # [batch, depth, num_nodes, num_nodes] if bias is not None: logits += bias weights = tf.nn.softmax(logits, name="attention_weights") if save_weights_to is not None: save_weights_to[scope.name] = weights # dropping out the attention links for each of the heads weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: common_attention.attention_image_summary(weights, image_shapes) return tf.matmul(weights, v)
Helper function that computes transformation for keys and values. Let B be the number of batches. Let N be the number of nodes in the graph. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries (total_key_depth). Let V be the size of the attention values (total_value_depth). Let T be the total number of transforms (num_transforms). Computes the transforms for keys or values for attention. * For each node N_j and edge type t, a key K_jt of size K is computed. When an edge of type t goes from node N_j to any other node, K_jt is the key that is in the attention process. * For each node N_j and edge type t, a value V_jt of size V is computed. When an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt) produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt. Args: node_states: A tensor of shape [B, L, D] depth: An integer (K or V) num_transforms: An integer (T), name: A name for the function Returns: x: A The attention keys or values for each node and edge type (shape [B, N*T, K or V])
def _compute_edge_transforms(node_states, depth, num_transforms, name="transform"): """Helper function that computes transformation for keys and values. Let B be the number of batches. Let N be the number of nodes in the graph. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries (total_key_depth). Let V be the size of the attention values (total_value_depth). Let T be the total number of transforms (num_transforms). Computes the transforms for keys or values for attention. * For each node N_j and edge type t, a key K_jt of size K is computed. When an edge of type t goes from node N_j to any other node, K_jt is the key that is in the attention process. * For each node N_j and edge type t, a value V_jt of size V is computed. When an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt) produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt. Args: node_states: A tensor of shape [B, L, D] depth: An integer (K or V) num_transforms: An integer (T), name: A name for the function Returns: x: A The attention keys or values for each node and edge type (shape [B, N*T, K or V]) """ node_shapes = common_layers.shape_list(node_states) x = common_layers.dense( node_states, depth * num_transforms, use_bias=False, name=name) batch = node_shapes[0] # B. length = node_shapes[1] # N. # Making the fourth dimension explicit by separating the vectors of size # K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T] # (in k) and [V, T] in v. # x = tf.reshape(x, [batch, length, num_transforms, depth]) # Flatten out the fourth dimension. x = tf.reshape(x, [batch, length * num_transforms, depth]) return x
Computes query, key and value for edge matrices. Let B be the number of batches. Let N be the number of nodes in the graph. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries (total_key_depth). Let V be the size of the attention values (total_value_depth). Let T be the total number of transforms (num_transforms). Computes the queries, keys, and values for attention. * For each node N_i in the graph, a query Q_i of size K is computed. This query is used to determine the relative weights to give to each of the node's incoming edges. * For each node N_j and edge type t, a key K_jt of size K is computed. When an edge of type t goes from node N_j to any other node, K_jt is the key that is in the attention process. * For each node N_j and edge type t, a value V_jt of size V is computed. When an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt) produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt. Args: node_states: A Tensor with shape [B, N, D]. total_key_depth: an integer (K). total_value_depth: an integer (V). num_transforms: a integer specifying number of transforms (T). This is typically the number of edge types. Returns: q: The attention queries for each destination node (shape [B, N, K]). k: The attention keys for each node and edge type (shape [B, N*T, K]). v: The attention values for each node and edge type (shape [B, N*T, V]).
def compute_mpnn_qkv(node_states, total_key_depth, total_value_depth, num_transforms): """Computes query, key and value for edge matrices. Let B be the number of batches. Let N be the number of nodes in the graph. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries (total_key_depth). Let V be the size of the attention values (total_value_depth). Let T be the total number of transforms (num_transforms). Computes the queries, keys, and values for attention. * For each node N_i in the graph, a query Q_i of size K is computed. This query is used to determine the relative weights to give to each of the node's incoming edges. * For each node N_j and edge type t, a key K_jt of size K is computed. When an edge of type t goes from node N_j to any other node, K_jt is the key that is in the attention process. * For each node N_j and edge type t, a value V_jt of size V is computed. When an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt) produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt. Args: node_states: A Tensor with shape [B, N, D]. total_key_depth: an integer (K). total_value_depth: an integer (V). num_transforms: a integer specifying number of transforms (T). This is typically the number of edge types. Returns: q: The attention queries for each destination node (shape [B, N, K]). k: The attention keys for each node and edge type (shape [B, N*T, K]). v: The attention values for each node and edge type (shape [B, N*T, V]). """ # node_states is initially a tensor with shape [B, N, D]. The call to dense # creates a D x K kernel that serves as a fully-connected layer. # # For each possible batch b and node n in the first two dimensions of # node_states, the corresponding size-D vector (the third dimension of # node_states) is the hidden state for node n in batch b. Each of these size-D # vectors is multiplied by the kernel to produce an attention query of size K. # The result is a tensor of size [B, N, K] containing the attention queries # for each node in each batch. q = common_layers.dense( node_states, total_key_depth, use_bias=False, name="q_mpnn") # Creates the attention keys in a manner similar to the process of creating # the attention queries. One key is created for each type of outgoing edge the # corresponding node might have, meaning k will have shape [B, N, K*T]. k = _compute_edge_transforms(node_states, total_key_depth, num_transforms, name="k_mpnn") v = _compute_edge_transforms(node_states, total_value_depth, num_transforms, name="v_mpnn") return q, k, v
Identical to sparse_ggnn except that each input has a batch dimension. B = The batch size. N = The number of nodes in each batch. H = The size of the hidden states. T = The number of edge types. Args: node_states: Initial states of each node in the graph. Shape: [B, N, H] adjacency_matrices: Adjacency matrices of directed edges for each edge type and batch. Shape: [B, N, N, T] (sparse). num_edge_types: The number of edge types. T. hidden_size: The size of the hidden layer. H. use_bias: Whether to use bias in the hidden layer. average_aggregation: How to aggregate the incoming node messages. If average_aggregation is true, the messages are averaged. If it is false, they are summed. name: (optional) The scope within which tf variables should be created. Returns: The result of one round of message-passing of shape [B, N, H].
def sparse_message_pass_batched(node_states, adjacency_matrices, num_edge_types, hidden_size, use_bias=True, average_aggregation=False, name="sparse_ggnn_batched"): """Identical to sparse_ggnn except that each input has a batch dimension. B = The batch size. N = The number of nodes in each batch. H = The size of the hidden states. T = The number of edge types. Args: node_states: Initial states of each node in the graph. Shape: [B, N, H] adjacency_matrices: Adjacency matrices of directed edges for each edge type and batch. Shape: [B, N, N, T] (sparse). num_edge_types: The number of edge types. T. hidden_size: The size of the hidden layer. H. use_bias: Whether to use bias in the hidden layer. average_aggregation: How to aggregate the incoming node messages. If average_aggregation is true, the messages are averaged. If it is false, they are summed. name: (optional) The scope within which tf variables should be created. Returns: The result of one round of message-passing of shape [B, N, H]. """ b, n = tf.shape(node_states)[0], tf.shape(node_states)[1] # Flatten the batch dimension of the node states. node_states = tf.reshape(node_states, [b*n, hidden_size]) # Flatten the batch dimension of the adjacency matrices. indices = adjacency_matrices.indices new_index2 = indices[:, 3] # The edge type dimension. # Offset N x N adjacency matrix by the batch number in which it appears. new_index0 = indices[:, 1] + indices[:, 0] * tf.cast(n, tf.int64) new_index1 = indices[:, 2] + indices[:, 0] * tf.cast(n, tf.int64) # Combine these indices as triples. new_indices = tf.stack([new_index0, new_index1, new_index2], axis=1) # Build the new sparse matrix. new_shape = [tf.cast(b*n, tf.int64), tf.cast(b*n, tf.int64), num_edge_types] adjacency_matrices = tf.SparseTensor(indices=new_indices, values=adjacency_matrices.values, dense_shape=new_shape) # Run a message-passing step and return the result with the batch dimension. node_states = sparse_message_pass( node_states, adjacency_matrices, num_edge_types, hidden_size, use_bias=use_bias, average_aggregation=average_aggregation, name=name) return tf.reshape(node_states, [b, n, hidden_size])
One message-passing step for a GNN with a sparse adjacency matrix. Implements equation 2 (the message passing step) in [Li et al. 2015](https://arxiv.org/abs/1511.05493). N = The number of nodes in each batch. H = The size of the hidden states. T = The number of edge types. Args: node_states: Initial states of each node in the graph. Shape is [N, H]. adjacency_matrices: Adjacency matrix of directed edges for each edge type. Shape is [N, N, T] (sparse tensor). num_edge_types: The number of edge types. T. hidden_size: The size of the hidden state. H. use_bias: Whether to use bias in the hidden layer. average_aggregation: How to aggregate the incoming node messages. If average_aggregation is true, the messages are averaged. If it is false, they are summed. name: (optional) The scope within which tf variables should be created. Returns: The result of one step of Gated Graph Neural Network (GGNN) message passing. Shape: [N, H]
def sparse_message_pass(node_states, adjacency_matrices, num_edge_types, hidden_size, use_bias=True, average_aggregation=False, name="sparse_ggnn"): """One message-passing step for a GNN with a sparse adjacency matrix. Implements equation 2 (the message passing step) in [Li et al. 2015](https://arxiv.org/abs/1511.05493). N = The number of nodes in each batch. H = The size of the hidden states. T = The number of edge types. Args: node_states: Initial states of each node in the graph. Shape is [N, H]. adjacency_matrices: Adjacency matrix of directed edges for each edge type. Shape is [N, N, T] (sparse tensor). num_edge_types: The number of edge types. T. hidden_size: The size of the hidden state. H. use_bias: Whether to use bias in the hidden layer. average_aggregation: How to aggregate the incoming node messages. If average_aggregation is true, the messages are averaged. If it is false, they are summed. name: (optional) The scope within which tf variables should be created. Returns: The result of one step of Gated Graph Neural Network (GGNN) message passing. Shape: [N, H] """ n = tf.shape(node_states)[0] t = num_edge_types incoming_edges_per_type = tf.sparse_reduce_sum(adjacency_matrices, axis=1) # Convert the adjacency matrix into shape [T, N, N] - one [N, N] adjacency # matrix for each edge type. Since sparse tensor multiplication only supports # two-dimensional tensors, we actually convert the adjacency matrix into a # [T * N, N] tensor. adjacency_matrices = tf.sparse_transpose(adjacency_matrices, [2, 0, 1]) adjacency_matrices = tf.sparse_reshape(adjacency_matrices, [t * n, n]) # Multiply the adjacency matrix by the node states, producing a [T * N, H] # tensor. For each (edge type, node) pair, this tensor stores the sum of # the hidden states of the node's neighbors over incoming edges of that type. messages = tf.sparse_tensor_dense_matmul(adjacency_matrices, node_states) # Rearrange this tensor to have shape [N, T * H]. The incoming states of each # nodes neighbors are summed by edge type and then concatenated together into # a single T * H vector. messages = tf.reshape(messages, [t, n, hidden_size]) messages = tf.transpose(messages, [1, 0, 2]) messages = tf.reshape(messages, [n, t * hidden_size]) # Run each of those T * H vectors through a linear layer that produces # a vector of size H. This process is equivalent to running each H-sized # vector through a separate linear layer for each edge type and then adding # the results together. # # Note that, earlier on, we added together all of the states of neighbors # that were connected by edges of the same edge type. Since addition and # multiplying by a linear layer are commutative, this process was equivalent # to running each incoming edge through a linear layer separately and then # adding everything at the end. with tf.variable_scope(name, default_name="sparse_ggnn"): final_node_states = common_layers.dense( messages, hidden_size, use_bias=False) # Multiply the bias by for each edge type by the number of incoming nodes # of that edge type. if use_bias: bias = tf.get_variable("bias", initializer=tf.zeros([t, hidden_size])) final_node_states += tf.matmul(incoming_edges_per_type, bias) if average_aggregation: incoming_edges = tf.reduce_sum(incoming_edges_per_type, -1, keepdims=True) incoming_edges = tf.tile(incoming_edges, [1, hidden_size]) final_node_states /= incoming_edges + 1e-7 return tf.reshape(final_node_states, [n, hidden_size])
Multihead scaled-dot-product attention with input/output transformations. Let B be the number of batches. Let N be the number of nodes in the graph. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries (total_key_depth). Let V be the size of the attention values (total_value_depth). Let O be the size of the attention output (output_depth). Let H be the number of heads (num_heads). Let T be the total number of transforms (num_transforms). The key and value depths are split across all of the heads. For example, if the key depth is 6 and there are three heads, then the key for each head has depth 2. Args: node_states: A Tensor with shape [B, N, D] total_key_depth: An integer (K). total_value_depth: An integer (V). output_depth: An integer (O). num_heads: An integer (H). adjacency_matrix: An Tensor of ints with shape [B, T, N, N]. If there is an edge from node j to node i in batch b, then adjacency_matrix[b, i, j] contains the type of that edge as an integer. Otherwise, it contains 0. num_edge_types: An integer indicating number of edge types. num_transforms: An integer indicating number of transforms (T). If None, then num_transforms will be equal to num_edge_types. use_weighted_sum: If False, will only use a single transform per edge type. Otherwise, use a learned weighted sum of transforms per edge type. name: A string. Returns: The result of the attention transformation. The output shape is [B, N, O]. Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads.
def multihead_mpnn_attention(node_states, total_key_depth, total_value_depth, output_depth, num_heads, adjacency_matrix=None, num_edge_types=5, num_transforms=None, use_weighted_sum=False, name="mpnn_attention"): """Multihead scaled-dot-product attention with input/output transformations. Let B be the number of batches. Let N be the number of nodes in the graph. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries (total_key_depth). Let V be the size of the attention values (total_value_depth). Let O be the size of the attention output (output_depth). Let H be the number of heads (num_heads). Let T be the total number of transforms (num_transforms). The key and value depths are split across all of the heads. For example, if the key depth is 6 and there are three heads, then the key for each head has depth 2. Args: node_states: A Tensor with shape [B, N, D] total_key_depth: An integer (K). total_value_depth: An integer (V). output_depth: An integer (O). num_heads: An integer (H). adjacency_matrix: An Tensor of ints with shape [B, T, N, N]. If there is an edge from node j to node i in batch b, then adjacency_matrix[b, i, j] contains the type of that edge as an integer. Otherwise, it contains 0. num_edge_types: An integer indicating number of edge types. num_transforms: An integer indicating number of transforms (T). If None, then num_transforms will be equal to num_edge_types. use_weighted_sum: If False, will only use a single transform per edge type. Otherwise, use a learned weighted sum of transforms per edge type. name: A string. Returns: The result of the attention transformation. The output shape is [B, N, O]. Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) with tf.variable_scope( name, default_name="multihead_mpnn_attention", values=[node_states]): # If not explicitly set, use num_transforms set to num_edge_types. num_transforms = ( num_edge_types if num_transforms is None else num_transforms) # Create the query for each node's incoming edges. # Create the keys/values for each node for each possible outgoing edge type. q, k, v = compute_mpnn_qkv( node_states, total_key_depth, total_value_depth, num_transforms) q_shape = tf.shape(q) # As above, q_shape is [B, N, K]. # Divides each query/key/value into separate heads. Specifically, the # query/key/value for each (batch, node) pair (i.e., the third dimensions # of q, k, and v) are broken into H separate pieces. These pieces are used # as the separate attention heads. The resulting tensors have shape # [B, H, N, ?/H], where ? = K, K*T or V*T as appropriate. q = common_attention.split_heads(q, num_heads) # Shape [B, H, N, K/H]. k = common_attention.split_heads(k, num_heads) # Shape [B, H, N, K*T/H]. v = common_attention.split_heads(v, num_heads) # Shape [B, H, N, V*T/H]. key_depth_per_head = total_key_depth // num_heads # Ensures that the logits don't have too large of a magnitude. q *= key_depth_per_head**-0.5 # Rearrange the dimensions so that the head is first. This will make # subsequent steps easier (we loop over the head). q = tf.transpose(q, [1, 0, 2, 3]) # Shape [H, B, N, K/H]. k = tf.transpose(k, [1, 0, 2, 3]) # Shape [H, B, N, K*T/H]. v = tf.transpose(v, [1, 0, 2, 3]) # Shape [H, B, N, V*T/H]. # Split the keys and values into separate per-edge-type keys and values. k = tf.reshape(k, [ num_heads, q_shape[0], q_shape[1], num_transforms, total_key_depth // num_heads ]) # Shape [H, B, N, T, K/H]. k = tf.transpose(k, [0, 1, 3, 2, 4]) # Shape [H, B, T, N, K/H]. v = tf.reshape(v, [ num_heads, q_shape[0], q_shape[1], num_transforms, total_value_depth // num_heads ]) # Shape [H, B, N, T, V/H]. v = tf.transpose(v, [0, 1, 3, 2, 4]) # Shape [H, B, T, N, V/H]. # Perform attention for each head and combine the results into a list. # head_outputs stores a list of tensors, each with shape [1, B, N, V/H]. # The last dimension contains the values computed for each attention head. # Each value was determined by computing attention over all of the # incoming edges for node n, weighting the incoming values accordingly, # and adding those weighted values together. head_outputs = [] for head_id in range(num_heads): output = dot_product_mpnn_attention( q[head_id], k[head_id], v[head_id], adjacency_matrix, num_edge_types, num_transforms=num_transforms, use_weighted_sum=use_weighted_sum) # Store this result in the list of attention results for each head. # The call to expand_dims gives output shape [1, B, N, V/H], which will # come in handy when we combine the heads together. head_outputs.append(tf.expand_dims(output, axis=0)) # Combine the heads together into one tensor and rearrange the dimensions. x = tf.concat(head_outputs, axis=0) # Shape [H, B, N, V/H]. x = tf.transpose(x, [1, 0, 2, 3]) # Shape [B, H, N, V/H]. # Concatenate the values produced by each head together into one vector. x = common_attention.combine_heads(x) # Shape [B, N, V]. # A fully-connected linear layer to convert from the value vectors of size V # to output vectors of length O (the appropriate output length). x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform") return x
Dot product attention with edge vectors. Let B be the number of batches. Let N be the number of nodes in the graph. Let K be the size of the attention keys/queries. Let V be the size of the attention values. Let T be the total number of transforms (num_transforms). Args: q: The query Tensor of shape [B, N, K]. k: The key Tensor of shape [B, T, N, K]. v: The value Tensor of shape [B, T, N, V]. adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at indices b, i, j, k is the indicator of the edge from node j to node i in batch b. A standard adjacency matrix will only have one edge type while a mutigraph will have multiple edge types. num_edge_types: An integer specifying number of edge types. num_transforms: An integer indicating number of transforms (T). If None, then num_transforms will be equal to num_edge_types. use_weighted_sum: If False, will only use a single transform per edge type. Otherwise, use a learned weighted sum of transforms per edge type. name: A string. Returns: A Tensor of shape [B, N, V] storing the result of computing attention weights using the queries and keys and combining the values according to those weights. Raises: ValueError: if num_transforms doesn't equal num_edge_types and not using weighted sum.
def dot_product_mpnn_attention(q, k, v, adjacency_matrix, num_edge_types, num_transforms=None, use_weighted_sum=False, name=None): """Dot product attention with edge vectors. Let B be the number of batches. Let N be the number of nodes in the graph. Let K be the size of the attention keys/queries. Let V be the size of the attention values. Let T be the total number of transforms (num_transforms). Args: q: The query Tensor of shape [B, N, K]. k: The key Tensor of shape [B, T, N, K]. v: The value Tensor of shape [B, T, N, V]. adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at indices b, i, j, k is the indicator of the edge from node j to node i in batch b. A standard adjacency matrix will only have one edge type while a mutigraph will have multiple edge types. num_edge_types: An integer specifying number of edge types. num_transforms: An integer indicating number of transforms (T). If None, then num_transforms will be equal to num_edge_types. use_weighted_sum: If False, will only use a single transform per edge type. Otherwise, use a learned weighted sum of transforms per edge type. name: A string. Returns: A Tensor of shape [B, N, V] storing the result of computing attention weights using the queries and keys and combining the values according to those weights. Raises: ValueError: if num_transforms doesn't equal num_edge_types and not using weighted sum. """ with tf.variable_scope( name, default_name="dot_product_mpnn_attention", values=[q, k, v, adjacency_matrix, num_edge_types]): # If not explicitly set, use num_transforms set to num_edge_types. num_transforms = ( num_edge_types if num_transforms is None else num_transforms) if not use_weighted_sum and num_transforms != num_edge_types: raise ValueError("num_transforms must equal num_edge_types unless " "use_weighted_sum is True") # Computes the raw dot-product attention values between each query and # the corresponding keys it needs to consider. # # This operation takes the dot product of (the query for # each node) and (the key for each node for each possible edge type), # creating an N x N matrix for each edge type. The entry at index (i, j) # is the dot-product for the edge from node i to node j of the appropriate # type. These dot products will eventually become attention weights # specifying how much node i weights an edge of that type coming from node # j. all_edge_logits = tf.matmul( tf.tile(tf.expand_dims(q, axis=1), [1, num_edge_types, 1, 1]), k, transpose_b=True) # The adjacency matrix assumes there is only one directed edge (i <- j) for # each pair of nodes. If such an edge exists, it contains the integer # type of that edge at position (i, j) of the adjacency matrix. # # Construct edge_vectors of shape [B, N, N, T]. if use_weighted_sum: # Use dense representation for edge vectors. edge_vectors = make_edge_vectors( adjacency_matrix, num_edge_types, num_transforms) else: # Generate one-hot vectors based on edge types. # If there is an edge from node j to node i of type t, then index t of the # last dimension is 1 for entry (i, j) of the second and third dimensions. edge_vectors = tf.one_hot(adjacency_matrix, num_transforms) # Rearranging the dimensions to match the shape of all_edge_logits. edge_vectors = tf.transpose(edge_vectors, [0, 3, 1, 2]) # Element-wise multiplies all_edge_logits and edge_vectors. # # In other words: all_edge_logits contains N x N matrices of query-key # products. This element-wise multiplication zeroes out entries that do not # correspond to actual edges in the graph of the appropriate edge type. # all_edge_logits retains shape [B, T, N, N]. all_edge_logits *= edge_vectors # Since there can only be one edge from node A to node B, we can collapse # the T different adjacency matrices containing key-query pairs into one # adjacency matrix. logits is [B, N, N]. # TODO(dbieber): Use a reshape instead of reduce sum to attend over all # edges instead of over all neighboring nodes to handle the multigraph case. logits = tf.reduce_sum(all_edge_logits, axis=1) # For pairs of nodes with no edges between them, add a large negative bias # to each location without an edge so that the softmax of entries with the # value 0 become a small negative number instead. bias = 0 bias = tf.to_float(tf.equal( tf.reduce_sum(adjacency_matrix, axis=-1), 0)) * -1e9 logits += bias # Turn the raw key-query products into a probability distribution (or, # in terms of attention, weights). The softmax is computed across the # last dimension of logits. compatibility = tf.nn.softmax(logits) # Shape [B, N, N]. # Computes a summary showing the attention matrix as an image. Does not do # any work toward actually performing attention. common_attention.attention_image_summary( tf.expand_dims(compatibility, axis=1), None) # Repeats the attention matrix T times for each batch, producing # a tensor with shape [B, T, N, N] where the [N, N] component is T # repeats of the values found in compatibility. edge_compatibility = tf.tile( tf.expand_dims(compatibility, axis=1), [1, num_edge_types, 1, 1]) # Zeroes out the entries in edge_compatibility that do not correspond to # actual edges. edge_compatibility *= edge_vectors # Shape [B, T, N, N]. output = compute_values(edge_compatibility, v) return output
ggnn version of the MPNN from Gilmer et al. Let B be the number of batches. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries. Let V be the size of the output of the ggnn. Let T be the number of transforms / edge types. Args: node_states: The value Tensor of shape [B, T, N, D]. adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at indices b, i, j, k is the indicator of the edge from node j to node i in batch b. A standard adjacency matrix will only have values of one, while a mutigraph may have larger integer values. num_edge_types: An integer specifying number of edge types. total_value_depth: An integer (V) name: A string. Returns: A Tensor of shape [B, N, V] storing the result of computing attention weights using the queries and keys and combining the values according to those weights. Raises: ValueError: if num_transforms doesn't equal num_edge_types and not using weighted sum.
def ggnn_fast_dense(node_states, adjacency_matrix, num_edge_types, total_value_depth, name=None): """ggnn version of the MPNN from Gilmer et al. Let B be the number of batches. Let D be the size of the node hidden states. Let K be the size of the attention keys/queries. Let V be the size of the output of the ggnn. Let T be the number of transforms / edge types. Args: node_states: The value Tensor of shape [B, T, N, D]. adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at indices b, i, j, k is the indicator of the edge from node j to node i in batch b. A standard adjacency matrix will only have values of one, while a mutigraph may have larger integer values. num_edge_types: An integer specifying number of edge types. total_value_depth: An integer (V) name: A string. Returns: A Tensor of shape [B, N, V] storing the result of computing attention weights using the queries and keys and combining the values according to those weights. Raises: ValueError: if num_transforms doesn't equal num_edge_types and not using weighted sum. """ # between the same nodes (with only one edge of each type. adjacency_matrix # will need to be converted to shape [B, T, N, N]. with tf.variable_scope( name, default_name="ggnn_fast_dense", values=[node_states, adjacency_matrix, num_edge_types]): nodes_shape = common_layers.shape_list(node_states) v = _compute_edge_transforms(node_states, total_value_depth, num_edge_types, name="v_mpnn") v = tf.reshape(v, [nodes_shape[0], nodes_shape[1], num_edge_types, total_value_depth ]) # Shape [B, N, T, V]. v = tf.transpose(v, [0, 2, 1, 3]) # Shape [B, T, N, V]. # Rearranging the dimensions to match the shape of all_edge_logits. edge_vectors = tf.transpose(adjacency_matrix, [0, 3, 1, 2]) output = compute_values(edge_vectors, v) return output
Compute values. If edge compatibilities is just adjacency, we get ggnn. Args: edge_compatibility: A tensor of shape [batch, num_transforms, length, depth] v: A tensor of shape [batch, num_transforms, length, depth] Returns: output: A [batch, length, depth] tensor
def compute_values(edge_compatibility, v): """Compute values. If edge compatibilities is just adjacency, we get ggnn. Args: edge_compatibility: A tensor of shape [batch, num_transforms, length, depth] v: A tensor of shape [batch, num_transforms, length, depth] Returns: output: A [batch, length, depth] tensor """ # Computes the incoming value vectors for each node by weighting them # according to the attention weights. These values are still segregated by # edge type. # Shape = [B, T, N, V]. all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v) # Combines the weighted value vectors together across edge types into a # single N x V matrix for each batch. output = tf.reduce_sum(all_edge_values, axis=1) # Shape [B, N, V]. return output
Precompute the a_in and a_out tensors. (we don't want to add to the graph everytime _fprop is called) Args: adjacency: placeholder of real valued vectors of shape [B, L, L, E] hparams: HParams object Returns: edge_matrices: [batch, L * D, L * D] the dense matrix for message passing viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function of the edge vector of the adjacency matrix at that spot.
def precompute_edge_matrices(adjacency, hparams): """Precompute the a_in and a_out tensors. (we don't want to add to the graph everytime _fprop is called) Args: adjacency: placeholder of real valued vectors of shape [B, L, L, E] hparams: HParams object Returns: edge_matrices: [batch, L * D, L * D] the dense matrix for message passing viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function of the edge vector of the adjacency matrix at that spot. """ batch_size, num_nodes, _, edge_dim = common_layers.shape_list(adjacency) # build the edge_network for incoming edges with tf.variable_scope("edge_network"): x = tf.reshape( adjacency, [batch_size * num_nodes * num_nodes, edge_dim], name="adj_reshape_in") for ip_layer in range(hparams.edge_network_layers): name = "edge_network_layer_%d"%ip_layer x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), hparams.edge_network_hidden_size, activation=tf.nn.relu, name=name) x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), hparams.hidden_size**2, activation=None, name="edge_network_output") # x = [batch * l * l, d *d] edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes, num_nodes, hparams.hidden_size, hparams.hidden_size]) # reshape to [batch, l * d, l *d] edge_matrices = tf.reshape( tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [ -1, num_nodes * hparams.hidden_size, num_nodes * hparams.hidden_size ], name="edge_matrices") return edge_matrices
Computes a_t from h_{t-1}, see bottom of page 3 in the paper. Args: node_states: [B, L, D] tensor (h_{t-1}) edge_matrices (tf.float32): [B, L*D, L*D] Returns: messages (tf.float32): [B, L, D] For each pair of nodes in the graph a message is sent along both the incoming and outgoing edge.
def dense_message_pass(node_states, edge_matrices): """Computes a_t from h_{t-1}, see bottom of page 3 in the paper. Args: node_states: [B, L, D] tensor (h_{t-1}) edge_matrices (tf.float32): [B, L*D, L*D] Returns: messages (tf.float32): [B, L, D] For each pair of nodes in the graph a message is sent along both the incoming and outgoing edge. """ batch_size, num_nodes, node_dim = common_layers.shape_list(node_states) # Stack the nodes as a big column vector. h_flat = tf.reshape( node_states, [batch_size, num_nodes * node_dim, 1], name="h_flat") messages = tf.reshape( tf.matmul(edge_matrices, h_flat), [batch_size * num_nodes, node_dim], name="messages_matmul") message_bias = tf.get_variable("message_bias", shape=node_dim) messages = messages + message_bias messages = tf.reshape(messages, [batch_size, num_nodes, node_dim]) return messages
Helper: build tf.Example from (string -> int/float/str list) dictionary.
def to_example(dictionary): """Helper: build tf.Example from (string -> int/float/str list) dictionary.""" features = {} for (k, v) in six.iteritems(dictionary): if not v: raise ValueError("Empty generated field: %s" % str((k, v))) if isinstance(v[0], six.integer_types): features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v)) elif isinstance(v[0], float): features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v)) elif isinstance(v[0], six.string_types): if not six.PY2: # Convert in python 3. v = [bytes(x, "utf-8") for x in v] features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) elif isinstance(v[0], bytes): features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) else: raise ValueError("Value for %s is not a recognized type; v: %s type: %s" % (k, str(v[0]), str(type(v[0])))) return tf.train.Example(features=tf.train.Features(feature=features))
generate_files but with a single writer writing to shard task_id.
def generate_files_distributed(generator, output_name, output_dir, num_shards=1, max_cases=None, task_id=0): """generate_files but with a single writer writing to shard task_id.""" assert task_id < num_shards output_filename = sharded_name(output_name, task_id, num_shards) output_file = os.path.join(output_dir, output_filename) tf.logging.info("Writing to file %s", output_file) writer = tf.python_io.TFRecordWriter(output_file) counter = 0 for case in generator: if counter % 100000 == 0: tf.logging.info("Generating case %d for %s." % (counter, output_name)) counter += 1 if max_cases and counter > max_cases: break example = to_example(case) writer.write(example.SerializeToString()) writer.close() return output_file
Generate cases from a generator and save as TFRecord files. Generated cases are transformed to tf.Example protos and saved as TFRecords in sharded files named output_dir/output_name-00..N-of-00..M=num_shards. Args: generator: a generator yielding (string -> int/float/str list) dictionaries. output_filenames: List of output file paths. max_cases: maximum number of cases to get from the generator; if None (default), we use the generator until StopIteration is raised. cycle_every_n: how many cases from the generator to take before switching to the next shard; by default set to 1, switch every case.
def generate_files(generator, output_filenames, max_cases=None, cycle_every_n=1): """Generate cases from a generator and save as TFRecord files. Generated cases are transformed to tf.Example protos and saved as TFRecords in sharded files named output_dir/output_name-00..N-of-00..M=num_shards. Args: generator: a generator yielding (string -> int/float/str list) dictionaries. output_filenames: List of output file paths. max_cases: maximum number of cases to get from the generator; if None (default), we use the generator until StopIteration is raised. cycle_every_n: how many cases from the generator to take before switching to the next shard; by default set to 1, switch every case. """ if outputs_exist(output_filenames): tf.logging.info("Skipping generator because outputs files exists at {}" .format(output_filenames)) return tmp_filenames = [fname + ".incomplete" for fname in output_filenames] num_shards = len(output_filenames) # Check if is training or eval, ref: train_data_filenames(). if num_shards > 0: if "-train" in output_filenames[0]: tag = "train" elif "-dev" in output_filenames[0]: tag = "eval" else: tag = "other" writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames] counter, shard = 0, 0 for case in generator: if case is None: continue if counter % 100000 == 0: tf.logging.info("Generating case %d." % counter) counter += 1 if max_cases and counter > max_cases: break example = to_example(case) writers[shard].write(example.SerializeToString()) if counter % cycle_every_n == 0: shard = (shard + 1) % num_shards for writer in writers: writer.close() for tmp_name, final_name in zip(tmp_filenames, output_filenames): tf.gfile.Rename(tmp_name, final_name) if num_shards > 0: if tag == "train": mlperf_log.transformer_print( key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES, value=counter) elif tag == "eval": mlperf_log.transformer_print( key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES, value=counter) tf.logging.info("Generated %s Examples", counter)
Report hook for download progress. Args: count: current block number block_size: block size total_size: total size
def download_report_hook(count, block_size, total_size): """Report hook for download progress. Args: count: current block number block_size: block size total_size: total size """ percent = int(count * block_size * 100 / total_size) print("\r%d%%" % percent + " completed", end="\r")