INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Download and extract MSCOCO datasets to directory unless it is there.
def _get_mscoco(directory): """Download and extract MSCOCO datasets to directory unless it is there.""" for url in _MSCOCO_URLS: filename = os.path.basename(url) download_url = os.path.join(_MSCOCO_ROOT_URL, url) path = generator_utils.maybe_download(directory, filename, download_url) unzip_dir = os.path.join(directory, filename.strip(".zip")) if not tf.gfile.Exists(unzip_dir): zipfile.ZipFile(path, "r").extractall(directory)
Image generator for MSCOCO captioning problem with token-wise captions. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. eos_list: optional list of end of sentence tokens, otherwise use default value `1`. vocab_filename: file within `tmp_dir` to read vocabulary from. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/class/label: a list of integers representing the caption, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type.
def mscoco_generator(data_dir, tmp_dir, training, how_many, start_from=0, eos_list=None, vocab_filename=None): """Image generator for MSCOCO captioning problem with token-wise captions. Args: data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. eos_list: optional list of end of sentence tokens, otherwise use default value `1`. vocab_filename: file within `tmp_dir` to read vocabulary from. Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/class/label: a list of integers representing the caption, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type. """ eos_list = [1] if eos_list is None else eos_list def get_vocab(): """Get vocab for caption text encoder.""" if data_dir is not None and vocab_filename is not None: vocab_filepath = os.path.join(data_dir, vocab_filename) if tf.gfile.Exists(vocab_filepath): tf.logging.info("Found vocab file: %s", vocab_filepath) vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath) return vocab_symbolizer else: raise ValueError("Vocab file does not exist: %s" % vocab_filepath) return None vocab_symbolizer = get_vocab() _get_mscoco(tmp_dir) caption_filepath = ( _MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE) caption_filepath = os.path.join(tmp_dir, caption_filepath) prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX caption_file = io.open(caption_filepath) caption_json = json.load(caption_file) # Dictionary from image_id to ((filename, height, width), captions). image_dict = {} for image in caption_json["images"]: image_dict[image["id"]] = [(image["file_name"], image["height"], image["width"]), []] annotations = caption_json["annotations"] annotation_count = len(annotations) image_count = len(image_dict) tf.logging.info("Processing %d images and %d labels\n" % (image_count, annotation_count)) for annotation in annotations: image_id = annotation["image_id"] image_dict[image_id][1].append(annotation["caption"]) data = list(image_dict.values())[start_from:start_from + how_many] random.shuffle(data) for image_info, labels in data: image_filename = image_info[0] image_filepath = os.path.join(tmp_dir, prefix, image_filename) with tf.gfile.Open(image_filepath, "rb") as f: encoded_image_data = f.read() height, width = image_info[1], image_info[2] for label in labels: if vocab_filename is None or vocab_symbolizer is None: label = [ord(c) for c in label] + eos_list else: label = vocab_symbolizer.encode(label) + eos_list yield { "image/encoded": [encoded_image_data], "image/format": ["jpeg"], "image/class/label": label, "image/height": [height], "image/width": [width] }
Convert FLAGS to list of args suitable for passing on cmd line.
def flags_as_args(): """Convert FLAGS to list of args suitable for passing on cmd line.""" if hasattr(FLAGS, "flag_values_dict"): args_dict = FLAGS.flag_values_dict() else: args_dict = dict(FLAGS.__dict__["__flags"]) del args_dict["cloud_mlengine"] # Configured later del args_dict["t2t_usr_dir"] args_dict.pop("h", None) args_dict.pop("helpfull", None) args_dict.pop("helpshort", None) args_dict.pop("help", None) args = [] for name, val in args_dict.items(): if val is None: continue if name.startswith("autotune"): continue args.extend(["--%s=%s" % (name, str(val))]) return args
Returns master_type for trainingInput.
def get_default_master_type(num_gpus=1): """Returns master_type for trainingInput.""" gpus_to_master_map = { 0: "standard", 1: "standard_p100", 4: "complex_model_m_p100", 8: "complex_model_l_gpu", } if num_gpus not in gpus_to_master_map: raise ValueError("Num gpus must be in %s" % str(sorted(list(gpus_to_master_map.keys())))) return gpus_to_master_map[num_gpus]
Construct jobSpec for ML Engine job.
def configure_job(): """Construct jobSpec for ML Engine job.""" # See documentation: # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput training_input = { "pythonModule": "tensor2tensor.bin.t2t_trainer", "args": flags_as_args(), "region": text_encoder.native_to_unicode(default_region()), "runtimeVersion": RUNTIME_VERSION, "pythonVersion": "3.5" if sys.version_info.major == 3 else "2.7", "jobDir": FLAGS.output_dir, "scaleTier": "CUSTOM", "masterType": FLAGS.cloud_mlengine_master_type or get_default_master_type( num_gpus=FLAGS.worker_gpu) } if FLAGS.use_tpu: training_input["masterType"] = (FLAGS.cloud_mlengine_master_type or "standard") training_input["workerType"] = "cloud_tpu" training_input["workerCount"] = 1 if FLAGS.hparams_range: tf.logging.info("Configuring hyperparameter tuning.") training_input["hyperparameters"] = configure_autotune( FLAGS.hparams_range, FLAGS.autotune_objective, FLAGS.autotune_maximize, FLAGS.autotune_max_trials, FLAGS.autotune_parallel_trials, ) timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") job_spec = { "jobId": "%s_%s_t2t_%s" % (FLAGS.model, FLAGS.problem, timestamp), "labels": { "model": FLAGS.model, "problem": FLAGS.problem, "hparams": FLAGS.hparams_set }, "trainingInput": training_input, } return job_spec
Launch job on ML Engine.
def launch_job(job_spec): """Launch job on ML Engine.""" project_id = "projects/{}".format( text_encoder.native_to_unicode(default_project())) credentials = GoogleCredentials.get_application_default() cloudml = discovery.build("ml", "v1", credentials=credentials, cache_discovery=False) request = cloudml.projects().jobs().create(body=job_spec, parent=project_id) request.execute()
Tar and gzip src_dir and copy to GCS target_dir.
def _tar_and_copy(src_dir, target_dir): """Tar and gzip src_dir and copy to GCS target_dir.""" src_dir = src_dir.rstrip("/") target_dir = target_dir.rstrip("/") tmp_dir = tempfile.gettempdir().rstrip("/") src_base = os.path.basename(src_dir) shell_run( "tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .", src_dir=src_dir, src_base=src_base, tmp_dir=tmp_dir) final_destination = "%s/%s.tar.gz" % (target_dir, src_base) shell_run( ("gsutil cp {tmp_dir}/{src_base}.tar.gz " "{final_destination}"), tmp_dir=tmp_dir, src_base=src_base, final_destination=final_destination) return final_destination
Tar Tensor2Tensor and cp to train_dir.
def tar_and_copy_t2t(train_dir): """Tar Tensor2Tensor and cp to train_dir.""" tf.logging.info("Tarring and pushing local Tensor2Tensor package.") output = text_encoder.native_to_unicode(shell_output( "pip show tensor2tensor")).split("\n") assert output[1].startswith("Version") assert output[7].startswith("Location") t2t_version = output[1].split(":")[1].strip() t2t_dir = output[7].split(":")[1].strip() # A local installation cloned from GitHub will have a setup.py file and a docs # folder is_local_t2t = all([ tf.gfile.Exists(os.path.join(t2t_dir, fname)) for fname in ["setup.py", "docs/cloud_mlengine.md"] ]) if is_local_t2t: tf.logging.info("Found local T2T installation. Tarring directory %s", t2t_dir) else: # PyPI installation # Create a folder with just a setup.py file pointing to the right version tf.logging.info("Found PyPI T2T installation. Launching tensor2tensor==%s", t2t_version) t2t_dir = os.path.join(tempfile.gettempdir(), "tensor2tensor_tmp") shutil.rmtree(t2t_dir, ignore_errors=True) os.mkdir(t2t_dir) setup_fname = os.path.join(t2t_dir, "setup.py") setup_file_str = get_setup_file( name="DummyT2TPackage", packages=["tensor2tensor==%s" % t2t_version] ) with tf.gfile.Open(setup_fname, "w") as f: f.write(setup_file_str) t2t_tar = _tar_and_copy(t2t_dir, train_dir) return t2t_tar
Package, tar, and copy usr_dir to GCS train_dir.
def tar_and_copy_usr_dir(usr_dir, train_dir): """Package, tar, and copy usr_dir to GCS train_dir.""" tf.logging.info("Tarring and pushing t2t_usr_dir.") usr_dir = os.path.abspath(os.path.expanduser(usr_dir)) # Copy usr dir to a temp location top_dir = os.path.join(tempfile.gettempdir(), "t2t_usr_container") tmp_usr_dir = os.path.join(top_dir, usr_dir_lib.INTERNAL_USR_DIR_PACKAGE) shutil.rmtree(top_dir, ignore_errors=True) shutil.copytree(usr_dir, tmp_usr_dir) # Insert setup.py if one does not exist top_setup_fname = os.path.join(top_dir, "setup.py") setup_file_str = get_setup_file( name="DummyUsrDirPackage", packages=get_requirements(usr_dir) ) with tf.gfile.Open(top_setup_fname, "w") as f: f.write(setup_file_str) usr_tar = _tar_and_copy(top_dir, train_dir) return usr_tar
Validates flags are set to acceptable values for CloudML Engine runs.
def validate_flags(): """Validates flags are set to acceptable values for CloudML Engine runs.""" assert not job_dir() assert FLAGS.output_dir.startswith("gs://") assert FLAGS.data_dir.startswith("gs://") assert FLAGS.worker_replicas <= 1 assert FLAGS.ps_replicas <= 0 if FLAGS.hparams_range: assert FLAGS.autotune_objective if FLAGS.worker_gpu: assert FLAGS.worker_gpu in [1, 4, 8] if FLAGS.cloud_mlengine_master_type: if FLAGS.worker_gpu: if FLAGS.worker_gpu == 1: assert FLAGS.cloud_mlengine_master_type in ["standard_gpu", "standard_p100"] elif FLAGS.worker_gpu == 4: assert FLAGS.cloud_mlengine_master_type in ["complex_model_m_gpu", "complex_model_m_p100"] else: assert FLAGS.cloud_mlengine_master_type == "complex_model_l_gpu" else: assert FLAGS.cloud_mlengine_master_type in ["standard", "large_model", "complex_model_s", "complex_model_m", "complex_model_l"]
Launch t2t_trainer on Cloud ML Engine.
def launch(): """Launch t2t_trainer on Cloud ML Engine.""" validate_flags() job_spec = configure_job() job_name = job_spec["jobId"] tf.logging.info("Launching job %s with ML Engine spec:\n%s", job_name, pprint.pformat(job_spec)) assert confirm() train_dir = FLAGS.output_dir t2t_tar = tar_and_copy_t2t(train_dir) configure_trainer_package(job_spec, t2t_tar) if FLAGS.t2t_usr_dir: usr_tar = tar_and_copy_usr_dir(FLAGS.t2t_usr_dir, train_dir) configure_usr_dir(job_spec, usr_tar) launch_job(job_spec) tf.logging.info("Launched %s. See console to track: %s.", job_name, CONSOLE_URL) tf.logging.info("Interact with the training job from the command line:") tf.logging.info("Abort job: gcloud ml-engine jobs cancel %s", job_name) tf.logging.info("Stream logs: gcloud ml-engine jobs stream-logs %s", job_name) tf.logging.info("Open tensorboard: tensorboard --logdir %s", train_dir)
Decorator for Layers, overriding add_weight for trainable initializers.
def add_weight(cls): """Decorator for Layers, overriding add_weight for trainable initializers.""" @functools.wraps(cls.add_weight) def _add_weight(self, name=None, shape=None, dtype=None, initializer=None, regularizer=None, **kwargs): """Adds weight.""" if isinstance(initializer, tf.keras.layers.Layer): weight = initializer(shape, dtype) self._trainable_weights.extend(initializer.trainable_weights) # pylint: disable=protected-access self._non_trainable_weights.extend(initializer.non_trainable_weights) # pylint: disable=protected-access if regularizer is not None: # TODO(trandustin): Replace need for this with # Layer._handle_weight_regularization. For Eager compatibility, random # variable __init__s cannot apply TF ops (cl/220898007). def loss_fn(): """Creates a regularization loss `Tensor`.""" with tf.name_scope(name + '/Regularizer'): return regularizer(initializer(shape, dtype)) self.add_loss(loss_fn) return weight return super(cls, self).add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=regularizer, **kwargs) cls.add_weight = _add_weight return cls
Get the KL multiplier, either dynamically or schedule based. if hparams.latent_loss_multiplier_dynamic is set to true, then beta is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon. In order to do so, the beta is being updated at each iteration by taking steps of size hparams.latent_loss_multiplier_alpha. The same formulation can be retrieved by solving the Lagrangian with KL < epsilon as a constraint. Args: kl_loss: KL loss. Only used for dynamic adjustment. Returns: beta: the final value of beta.
def get_beta(self, kl_loss=0.0): """Get the KL multiplier, either dynamically or schedule based. if hparams.latent_loss_multiplier_dynamic is set to true, then beta is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon. In order to do so, the beta is being updated at each iteration by taking steps of size hparams.latent_loss_multiplier_alpha. The same formulation can be retrieved by solving the Lagrangian with KL < epsilon as a constraint. Args: kl_loss: KL loss. Only used for dynamic adjustment. Returns: beta: the final value of beta. """ if self.hparams.latent_loss_multiplier_dynamic: beta = tf.Variable(self.hparams.latent_loss_multiplier, trainable=False, dtype=tf.float32) alpha = self.hparams.latent_loss_multiplier_alpha epsilon = self.hparams.latent_loss_multiplier_epsilon shadow_beta = beta + alpha * (kl_loss - epsilon) # Caping the beta between 0 and 1. May need to change this later on. shadow_beta = tf.maximum(shadow_beta, 0.0) shadow_beta = tf.minimum(shadow_beta, 1.0) update_op = tf.assign(beta, shadow_beta) else: beta = common_video.beta_schedule( schedule=self.hparams.latent_loss_multiplier_schedule, global_step=self.get_iteration_num(), final_beta=self.hparams.latent_loss_multiplier, decay_start=(self.hparams.num_iterations_1st_stage + self.hparams.num_iterations_2nd_stage), decay_end=self.hparams.anneal_end) update_op = tf.identity(beta) # fake update for regular beta. with tf.control_dependencies([update_op]): tf.summary.scalar("beta", beta) return beta
Get KL loss for all the predicted Gaussians.
def get_kl_loss(self, means, log_vars, means_p=None, log_vars_p=None): """Get KL loss for all the predicted Gaussians.""" kl_loss = 0.0 if means_p is None: means_p = tf.unstack(tf.zeros_like(means)) if log_vars_p is None: log_vars_p = tf.unstack(tf.zeros_like(log_vars)) enumerated_inputs = enumerate(zip(means, log_vars, means_p, log_vars_p)) if self.is_training and self.hparams.stochastic_model: for i, (mean, log_var, mean_p, log_var_p) in enumerated_inputs: kl_loss += common_layers.kl_divergence(mean, log_var, mean_p, log_var_p) tf.summary.histogram("posterior_mean_%d" % i, mean) tf.summary.histogram("posterior_log_var_%d" % i, log_var) tf.summary.histogram("prior_mean_%d" % i, mean_p) tf.summary.histogram("prior_log_var_%d" % i, log_var_p) tf.summary.scalar("kl_raw", tf.reduce_mean(kl_loss)) beta = self.get_beta(kl_loss) # information capacity from "Understanding disentangling in beta-VAE" if self.hparams.information_capacity > 0.0: kl_loss = tf.abs(kl_loss - self.hparams.information_capacity) return beta * kl_loss
Create the latent tower.
def construct_latent_tower(self, images, time_axis): """Create the latent tower.""" # No latent in the first phase first_phase = tf.less( self.get_iteration_num(), self.hparams.num_iterations_1st_stage) # use all frames by default but this allows more # predicted frames at inference time latent_num_frames = self.hparams.latent_num_frames tf.logging.info("Creating latent tower with %d frames." % latent_num_frames) if latent_num_frames > 0: images = images[:, :latent_num_frames] return common_video.conv_latent_tower( images=images, time_axis=time_axis, latent_channels=self.hparams.latent_channels, min_logvar=self.hparams.latent_std_min, is_training=self.is_training, random_latent=first_phase, tiny_mode=self.hparams.tiny_mode, small_mode=self.hparams.small_mode)
Encode transformer inputs. Args: encoder_function: the encoder function inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which will be flattened along the two spatial dimensions. target_space: scalar, target space ID. hparams: hyperparameters for model. attention_weights: weight to store attention to. features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to encoder_function Returns: Tuple of: encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length]
def transformer_encode(encoder_function, inputs, target_space, hparams, attention_weights=None, features=None, losses=None, **kwargs): """Encode transformer inputs. Args: encoder_function: the encoder function inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which will be flattened along the two spatial dimensions. target_space: scalar, target space ID. hparams: hyperparameters for model. attention_weights: weight to store attention to. features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to encoder_function Returns: Tuple of: encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] """ inputs = common_layers.flatten4d3d(inputs) encoder_input, self_attention_bias, encoder_decoder_attention_bias = ( transformer_prepare_encoder( inputs, target_space, hparams, features=features)) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=hparams.layer_prepostprocess_dropout, hparams=hparams) encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.layer_prepostprocess_dropout) attn_bias_for_padding = None # Otherwise the encoder will just use encoder_self_attention_bias. if hparams.unidirectional_encoder: attn_bias_for_padding = encoder_decoder_attention_bias encoder_output = encoder_function( encoder_input, self_attention_bias, hparams, nonpadding=features_to_nonpadding(features, "inputs"), save_weights_to=attention_weights, make_image_summary=not common_layers.is_xla_compiled(), losses=losses, attn_bias_for_padding=attn_bias_for_padding, **kwargs) return encoder_output, encoder_decoder_attention_bias
Decode Transformer outputs from encoder representation. Args: decoder_function: the decoder function decoder_input: inputs to bottom of the model. [batch_size, decoder_length, hidden_dim] encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] hparams: hyperparameters for model. attention_weights: weight to store attention to. cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. nonpadding: optional Tensor with shape [batch_size, decoder_length] losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to decoder_function Returns: Final decoder representation. [batch_size, decoder_length, hidden_dim]
def transformer_decode(decoder_function, decoder_input, encoder_output, encoder_decoder_attention_bias, decoder_self_attention_bias, hparams, attention_weights=None, cache=None, decode_loop_step=None, nonpadding=None, losses=None, **kwargs): """Decode Transformer outputs from encoder representation. Args: decoder_function: the decoder function decoder_input: inputs to bottom of the model. [batch_size, decoder_length, hidden_dim] encoder_output: Encoder representation. [batch_size, input_length, hidden_dim] encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder attention. [batch_size, input_length] decoder_self_attention_bias: Bias and mask weights for decoder self-attention. [batch_size, decoder_length] hparams: hyperparameters for model. attention_weights: weight to store attention to. cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. nonpadding: optional Tensor with shape [batch_size, decoder_length] losses: optional list onto which to append extra training losses **kwargs: additional arguments to pass to decoder_function Returns: Final decoder representation. [batch_size, decoder_length, hidden_dim] """ mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, value=hparams.layer_prepostprocess_dropout, hparams=hparams) decoder_input = tf.nn.dropout(decoder_input, 1.0 - hparams.layer_prepostprocess_dropout) decoder_output = decoder_function( decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, cache=cache, decode_loop_step=decode_loop_step, nonpadding=nonpadding, save_weights_to=attention_weights, losses=losses, **kwargs) if (common_layers.is_xla_compiled() and hparams.mode == tf.estimator.ModeKeys.TRAIN): # TPU does not react kindly to extra dimensions. # TODO(noam): remove this once TPU is more forgiving of extra dims. return decoder_output else: # Expand since t2t expects 4d tensors. return tf.expand_dims(decoder_output, axis=2)
Create the initial cache for Transformer fast decoding.
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length, encoder_output, encoder_decoder_attention_bias, scope_prefix): """Create the initial cache for Transformer fast decoding.""" key_channels = hparams.attention_key_channels or hparams.hidden_size value_channels = hparams.attention_value_channels or hparams.hidden_size num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers vars_3d_num_heads = ( hparams.num_heads if hparams.get("attention_variables_3d") else 0) if cache is None: cache = {} cache.update({ "layer_%d" % layer: { # pylint: disable=g-complex-comprehension "k": common_attention.split_heads( tf.zeros([batch_size, attention_init_length, key_channels]), hparams.num_heads), "v": common_attention.split_heads( tf.zeros([batch_size, attention_init_length, value_channels]), hparams.num_heads), } for layer in range(num_layers) }) # If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the # cache key "f" won't be used, which means that the` shape of cache["f"]` # won't be changed to # `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause # error when applying `nest.map reshape function` on it. if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]: for layer in range(num_layers): cache["layer_%d" % layer]["f"] = tf.zeros( [batch_size, 0, hparams.hidden_size]) if encoder_output is not None: for layer in range(num_layers): layer_name = "layer_%d" % layer with tf.variable_scope( "%sdecoder/%s/encdec_attention/multihead_attention" % (scope_prefix, layer_name)): k_encdec = common_attention.compute_attention_component( encoder_output, key_channels, name="k", vars_3d_num_heads=vars_3d_num_heads) k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads) v_encdec = common_attention.compute_attention_component( encoder_output, value_channels, name="v", vars_3d_num_heads=vars_3d_num_heads) v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads) cache[layer_name]["k_encdec"] = k_encdec cache[layer_name]["v_encdec"] = v_encdec cache["encoder_output"] = encoder_output cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias return cache
Given encoder output and a symbols to logits function, does fast decoding. Implements both greedy and beam search decoding for TPU, uses beam search iff beam_size > 1, otherwise beam search related arguments are ignored. Args: encoder_output: A tensor, output from encoder. encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder attention. symbols_to_logits_fn: Incremental decoding, function mapping triple `(ids, step, cache)` to symbol logits. hparams: Run hyperparameters. decode_length: An integer, how many additional timesteps to decode. vocab_size: Output vocabulary size. init_cache_fn: Function that returns the initial cache dict. beam_size: An integer, number of beams. top_beams: An integer, how many of the beams to return. alpha: A float that controls the length penalty. Larger the alpha, stronger the preference for longer translations. sos_id: Start-of-sequence symbol. eos_id: End-of-sequence symbol. batch_size: An integer, must be passed if there is no input. force_decode_length: A bool, whether to force the full decode length, or if False, stop when all beams hit eos_id. scope_prefix: str, prefix for decoder layer variable scopes. use_top_k_with_unique: bool, whether to use a fast (but decreased precision) top_k during beam search. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if top_beams == 1 or [batch_size, top_beams, <= decode_length] otherwise "scores": decoding log probs from the beam search, None if using greedy decoding (beam_size=1) }. Raises: NotImplementedError: If beam size > 1 with partial targets.
def fast_decode_tpu(encoder_output, encoder_decoder_attention_bias, symbols_to_logits_fn, hparams, decode_length, vocab_size, init_cache_fn=_init_transformer_cache, beam_size=1, top_beams=1, alpha=1.0, sos_id=0, eos_id=beam_search.EOS_ID, batch_size=None, force_decode_length=False, scope_prefix="body/", use_top_k_with_unique=True): """Given encoder output and a symbols to logits function, does fast decoding. Implements both greedy and beam search decoding for TPU, uses beam search iff beam_size > 1, otherwise beam search related arguments are ignored. Args: encoder_output: A tensor, output from encoder. encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder attention. symbols_to_logits_fn: Incremental decoding, function mapping triple `(ids, step, cache)` to symbol logits. hparams: Run hyperparameters. decode_length: An integer, how many additional timesteps to decode. vocab_size: Output vocabulary size. init_cache_fn: Function that returns the initial cache dict. beam_size: An integer, number of beams. top_beams: An integer, how many of the beams to return. alpha: A float that controls the length penalty. Larger the alpha, stronger the preference for longer translations. sos_id: Start-of-sequence symbol. eos_id: End-of-sequence symbol. batch_size: An integer, must be passed if there is no input. force_decode_length: A bool, whether to force the full decode length, or if False, stop when all beams hit eos_id. scope_prefix: str, prefix for decoder layer variable scopes. use_top_k_with_unique: bool, whether to use a fast (but decreased precision) top_k during beam search. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if top_beams == 1 or [batch_size, top_beams, <= decode_length] otherwise "scores": decoding log probs from the beam search, None if using greedy decoding (beam_size=1) }. Raises: NotImplementedError: If beam size > 1 with partial targets. """ if encoder_output is not None: batch_size = common_layers.shape_list(encoder_output)[0] cache = init_cache_fn(None, hparams, batch_size, decode_length, encoder_output, encoder_decoder_attention_bias, scope_prefix) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH, value={ "vocab_size": vocab_size, "batch_size": batch_size, "beam_size": beam_size, "alpha": alpha, "max_decode_length": decode_length }, hparams=hparams) if beam_size > 1: # Beam Search initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32) decoded_ids, scores, _ = beam_search.beam_search( symbols_to_logits_fn, initial_ids, beam_size, decode_length, vocab_size, alpha, states=cache, eos_id=eos_id, stop_early=(top_beams == 1), use_tpu=True, use_top_k_with_unique=use_top_k_with_unique) if top_beams == 1: decoded_ids = decoded_ids[:, 0, 1:] scores = scores[:, 0] else: decoded_ids = decoded_ids[:, :top_beams, 1:] scores = scores[:, :top_beams] else: # Greedy def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob): """One step of greedy decoding.""" logits, cache = symbols_to_logits_fn(next_id, i, cache) log_probs = common_layers.log_prob_from_logits(logits) temperature = getattr(hparams, "sampling_temp", 0.0) keep_top = getattr(hparams, "sampling_keep_top_k", -1) if hparams.sampling_method == "argmax": temperature = 0.0 next_id = common_layers.sample_with_temperature( logits, temperature, keep_top) hit_eos |= tf.equal(next_id, eos_id) log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id], axis=1) log_prob += tf.gather_nd(log_probs, log_prob_indices) next_id = tf.expand_dims(next_id, axis=1) decoded_ids = tf.transpose(decoded_ids) decoded_ids = inplace_ops.alias_inplace_update( decoded_ids, i, tf.squeeze(next_id, axis=1)) decoded_ids = tf.transpose(decoded_ids) return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob def is_not_finished(i, hit_eos, *_): finished = i >= decode_length if not force_decode_length: finished |= tf.reduce_all(hit_eos) return tf.logical_not(finished) decoded_ids = tf.zeros([batch_size, decode_length], dtype=tf.int64) hit_eos = tf.fill([batch_size], False) next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64) initial_log_prob = tf.zeros([batch_size], dtype=tf.float32) def compute_cache_shape_invariants(tensor): return tf.TensorShape(tensor.shape.as_list()) _, _, _, decoded_ids, _, log_prob = tf.while_loop( is_not_finished, inner_loop, [ tf.constant(0), hit_eos, next_id, decoded_ids, cache, initial_log_prob ], shape_invariants=[ tf.TensorShape([]), tf.TensorShape([batch_size]), tf.TensorShape([batch_size, 1]), tf.TensorShape([batch_size, decode_length]), nest.map_structure(compute_cache_shape_invariants, cache), tf.TensorShape([batch_size]), ]) scores = log_prob return {"outputs": decoded_ids, "scores": scores}
Given encoder output and a symbols to logits function, does fast decoding. Implements both greedy and beam search decoding, uses beam search iff beam_size > 1, otherwise beam search related arguments are ignored. Args: encoder_output: Output from encoder. encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention symbols_to_logits_fn: Incremental decoding; function mapping triple `(ids, step, cache)` to symbol logits. hparams: run hyperparameters decode_length: an integer. How many additional timesteps to decode. vocab_size: Output vocabulary size. init_cache_fn: Function that returns the initial cache dict. beam_size: number of beams. top_beams: an integer. How many of the beams to return. alpha: Float that controls the length penalty. larger the alpha, stronger the preference for longer translations. sos_id: End-of-sequence symbol in beam search. eos_id: End-of-sequence symbol in beam search. batch_size: an integer scalar - must be passed if there is no input force_decode_length: bool, whether to force the full decode length, or if False, stop when all beams hit eos_id. scope_prefix: str, prefix for decoder layer variable scopes. cache: cache dictionary for additional predictions. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if top_beams == 1 or [batch_size, top_beams, <= decode_length] otherwise "scores": decoding log probs from the beam search, None if using greedy decoding (beam_size=1) } Raises: NotImplementedError: If beam size > 1 with partial targets.
def fast_decode(encoder_output, encoder_decoder_attention_bias, symbols_to_logits_fn, hparams, decode_length, vocab_size, init_cache_fn=_init_transformer_cache, beam_size=1, top_beams=1, alpha=1.0, sos_id=0, eos_id=beam_search.EOS_ID, batch_size=None, force_decode_length=False, scope_prefix="body/", cache=None): """Given encoder output and a symbols to logits function, does fast decoding. Implements both greedy and beam search decoding, uses beam search iff beam_size > 1, otherwise beam search related arguments are ignored. Args: encoder_output: Output from encoder. encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention symbols_to_logits_fn: Incremental decoding; function mapping triple `(ids, step, cache)` to symbol logits. hparams: run hyperparameters decode_length: an integer. How many additional timesteps to decode. vocab_size: Output vocabulary size. init_cache_fn: Function that returns the initial cache dict. beam_size: number of beams. top_beams: an integer. How many of the beams to return. alpha: Float that controls the length penalty. larger the alpha, stronger the preference for longer translations. sos_id: End-of-sequence symbol in beam search. eos_id: End-of-sequence symbol in beam search. batch_size: an integer scalar - must be passed if there is no input force_decode_length: bool, whether to force the full decode length, or if False, stop when all beams hit eos_id. scope_prefix: str, prefix for decoder layer variable scopes. cache: cache dictionary for additional predictions. Returns: A dict of decoding results { "outputs": integer `Tensor` of decoded ids of shape [batch_size, <= decode_length] if top_beams == 1 or [batch_size, top_beams, <= decode_length] otherwise "scores": decoding log probs from the beam search, None if using greedy decoding (beam_size=1) } Raises: NotImplementedError: If beam size > 1 with partial targets. """ if encoder_output is not None: batch_size = common_layers.shape_list(encoder_output)[0] cache = init_cache_fn( cache=cache, hparams=hparams, batch_size=batch_size, attention_init_length=0, encoder_output=encoder_output, encoder_decoder_attention_bias=encoder_decoder_attention_bias, scope_prefix=scope_prefix) if beam_size > 1: # Beam Search initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32) decoded_ids, scores, cache = beam_search.beam_search( symbols_to_logits_fn, initial_ids, beam_size, decode_length, vocab_size, alpha, states=cache, eos_id=eos_id, stop_early=(top_beams == 1)) if top_beams == 1: decoded_ids = decoded_ids[:, 0, 1:] scores = scores[:, 0] else: decoded_ids = decoded_ids[:, :top_beams, 1:] scores = scores[:, :top_beams] else: # Greedy def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob): """One step of greedy decoding.""" logits, cache = symbols_to_logits_fn(next_id, i, cache) log_probs = common_layers.log_prob_from_logits(logits) temperature = getattr(hparams, "sampling_temp", 0.0) keep_top = getattr(hparams, "sampling_keep_top_k", -1) if hparams.sampling_method == "argmax": temperature = 0.0 next_id = common_layers.sample_with_temperature( logits, temperature, keep_top) hit_eos |= tf.equal(next_id, eos_id) log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id], axis=1) log_prob += tf.gather_nd(log_probs, log_prob_indices) next_id = tf.expand_dims(next_id, axis=1) decoded_ids = tf.concat([decoded_ids, next_id], axis=1) return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob def is_not_finished(i, hit_eos, *_): finished = i >= decode_length if not force_decode_length: finished |= tf.reduce_all(hit_eos) return tf.logical_not(finished) decoded_ids = tf.zeros([batch_size, 0], dtype=tf.int64) hit_eos = tf.fill([batch_size], False) next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64) initial_log_prob = tf.zeros([batch_size], dtype=tf.float32) _, _, _, decoded_ids, cache, log_prob = tf.while_loop( is_not_finished, inner_loop, [ tf.constant(0), hit_eos, next_id, decoded_ids, cache, initial_log_prob ], shape_invariants=[ tf.TensorShape([]), tf.TensorShape([None]), tf.TensorShape([None, None]), tf.TensorShape([None, None]), nest.map_structure(beam_search.get_state_shape_invariants, cache), tf.TensorShape([None]), ]) scores = log_prob return {"outputs": decoded_ids, "scores": scores, "cache": cache}
Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: decoder_input: a Tensor, bottom of decoder stack decoder_self_attention_bias: a bias tensor for use in decoder self-attention
def transformer_prepare_decoder(targets, hparams, features=None): """Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: decoder_input: a Tensor, bottom of decoder stack decoder_self_attention_bias: a bias tensor for use in decoder self-attention """ if hparams.causal_decoder_self_attention: # Causal attention. if hparams.prepend_mode == "prepend_inputs_full_attention": decoder_self_attention_bias = ( common_attention.attention_bias_prepend_inputs_full_attention( common_attention.embedding_to_padding(targets))) else: decoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(targets)[1])) else: # Full attention. decoder_padding = common_attention.embedding_to_padding(targets) decoder_self_attention_bias = ( common_attention.attention_bias_ignore_padding(decoder_padding)) if features and "targets_segmentation" in features: # "Packed" dataset - keep the examples from seeing each other. targets_segmentation = features["targets_segmentation"] targets_position = features["targets_position"] decoder_self_attention_bias += common_attention.attention_bias_same_segment( targets_segmentation, targets_segmentation) else: targets_position = None if hparams.proximity_bias: decoder_self_attention_bias += common_attention.attention_bias_proximal( common_layers.shape_list(targets)[1]) decoder_input = common_layers.shift_right_3d(targets) if hparams.pos == "timing": if targets_position is not None: decoder_input = common_attention.add_timing_signal_1d_given_position( decoder_input, targets_position) else: decoder_input = common_attention.add_timing_signal_1d(decoder_input) elif hparams.pos == "emb": decoder_input = common_attention.add_positional_embedding( decoder_input, hparams.max_length, "targets_positional_embedding", targets_position) if hparams.activation_dtype == "bfloat16": decoder_self_attention_bias = tf.cast(decoder_self_attention_bias, tf.bfloat16) return (decoder_input, decoder_self_attention_bias)
A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This is used to mask out padding in convolutional layers. We generally only need this mask for "packed" datasets, because for ordinary datasets, no padding is ever followed by nonpadding. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. losses: optional list onto which to append extra training losses layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. recurrent_memory_by_layer: Optional dict, mapping layer names to instances of transformer_memory.RecurrentMemory. Default is None. chunk_number: an optional integer Tensor with shape [batch] used to operate the recurrent_memory. Returns: y: a Tensors
def transformer_decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, cache=None, decode_loop_step=None, name="decoder", nonpadding=None, save_weights_to=None, make_image_summary=True, losses=None, layer_collection=None, recurrent_memory_by_layer=None, chunk_number=None, ): """A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This is used to mask out padding in convolutional layers. We generally only need this mask for "packed" datasets, because for ordinary datasets, no padding is ever followed by nonpadding. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. losses: optional list onto which to append extra training losses layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. recurrent_memory_by_layer: Optional dict, mapping layer names to instances of transformer_memory.RecurrentMemory. Default is None. chunk_number: an optional integer Tensor with shape [batch] used to operate the recurrent_memory. Returns: y: a Tensors """ x = decoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS, value=hparams.num_decoder_layers or hparams.num_hidden_layers, hparams=hparams) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT, value=hparams.attention_dropout, hparams=hparams) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_ATTENTION_DENSE, value={ "use_bias": "false", "num_heads": hparams.num_heads, "hidden_size": hparams.hidden_size }, hparams=hparams) with tf.variable_scope(name): for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): layer_name = "layer_%d" % layer layer_cache = cache[layer_name] if cache is not None else None if recurrent_memory_by_layer is not None: recurrent_memory = recurrent_memory_by_layer[layer_name] else: recurrent_memory = None if layer < hparams.get("num_area_layers", 0): max_area_width = hparams.get("max_area_width", 1) max_area_height = hparams.get("max_area_height", 1) memory_height = hparams.get("max_area_height", 1) else: max_area_width = 1 max_area_height = 1 memory_height = 1 with tf.variable_scope(layer_name): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams, layer_collection=layer_collection), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=layer_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), decode_loop_step=decode_loop_step, vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32"), layer_collection=layer_collection, recurrent_memory=recurrent_memory, chunk_number=chunk_number, hard_attention_k=hparams.get("hard_attention_k", 0), max_area_width=max_area_width, max_area_height=max_area_height, memory_height=memory_height, area_key_mode=hparams.get("area_key_mode", "none"), area_value_mode=hparams.get("area_value_mode", "none"), training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN)) x = common_layers.layer_postprocess(x, y, hparams) if encoder_output is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams, layer_collection=layer_collection), encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, cache=layer_cache, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32"), layer_collection=layer_collection, hard_attention_k=hparams.get("hard_attention_k", 0), max_area_width=max_area_width, max_area_height=max_area_height, memory_height=memory_height, area_key_mode=hparams.get("area_key_mode", "none"), area_value_mode=hparams.get("area_value_mode", "none"), training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN)) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = transformer_ffn_layer( common_layers.layer_preprocess( x, hparams, layer_collection=layer_collection), hparams, conv_padding="LEFT", nonpadding_mask=nonpadding, losses=losses, cache=layer_cache, decode_loop_step=decode_loop_step, layer_collection=layer_collection) x = common_layers.layer_postprocess(x, y, hparams) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_NORM, value={"hidden_size": hparams.hidden_size}) return common_layers.layer_preprocess( x, hparams, layer_collection=layer_collection)
Set of hyperparameters.
def transformer_base_v1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.norm_type = "layer" hparams.hidden_size = 512 hparams.batch_size = 4096 hparams.max_length = 256 hparams.clip_grad_norm = 0. # i.e. no gradient clipping hparams.optimizer_adam_epsilon = 1e-9 hparams.learning_rate_schedule = "legacy" hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.learning_rate_warmup_steps = 4000 hparams.initializer_gain = 1.0 hparams.num_hidden_layers = 6 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 hparams.num_sampled_classes = 0 hparams.label_smoothing = 0.1 hparams.shared_embedding_and_softmax_weights = True hparams.symbol_modality_num_shards = 16 # Add new ones like this. hparams.add_hparam("filter_size", 2048) # Layer-related flags. If zero, these fall back on hparams.num_hidden_layers. hparams.add_hparam("num_encoder_layers", 0) hparams.add_hparam("num_decoder_layers", 0) # Attention-related flags. hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("ffn_layer", "dense_relu_dense") hparams.add_hparam("parameter_attention_key_channels", 0) hparams.add_hparam("parameter_attention_value_channels", 0) # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. hparams.add_hparam("attention_dropout", 0.0) hparams.add_hparam("attention_dropout_broadcast_dims", "") hparams.add_hparam("relu_dropout", 0.0) hparams.add_hparam("relu_dropout_broadcast_dims", "") hparams.add_hparam("pos", "timing") # timing, none hparams.add_hparam("nbr_decoder_problems", 1) hparams.add_hparam("proximity_bias", False) hparams.add_hparam("causal_decoder_self_attention", True) hparams.add_hparam("use_pad_remover", True) hparams.add_hparam("self_attention_type", "dot_product") hparams.add_hparam("conv_first_kernel", 3) hparams.add_hparam("attention_variables_3d", False) hparams.add_hparam("use_target_space_embedding", True) # These parameters are only used when ffn_layer=="local_moe_tpu" hparams.add_hparam("moe_overhead_train", 1.0) hparams.add_hparam("moe_overhead_eval", 2.0) hparams.moe_num_experts = 16 hparams.moe_loss_coef = 1e-3 # If specified, use this value instead of problem name in metrics.py. # This is useful for programs that can automatically compare experiments side # by side based on the same metric names. hparams.add_hparam("overload_eval_metric_name", "") # For making a transformer encoder unidirectional by using masked # attention. hparams.add_hparam("unidirectional_encoder", False) # For hard attention. hparams.add_hparam("hard_attention_k", 0) return hparams
Set of hyperparameters.
def transformer_base_v2(): """Set of hyperparameters.""" hparams = transformer_base_v1() hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.layer_prepostprocess_dropout = 0.1 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 hparams.learning_rate_warmup_steps = 8000 hparams.learning_rate = 0.2 return hparams
Set of hyperparameters for lm1b packed following tpu params.
def transformer_base_vq_ada_32ex_packed(): """Set of hyperparameters for lm1b packed following tpu params.""" hparams = transformer_base_v2() expert_utils.update_hparams_for_vq_gating(hparams) hparams.moe_num_experts = 32 hparams.gating_type = "vq" # this gives us a batch size of 16 because each seq is len 256 hparams.batch_size = 5072 hparams.ffn_layer = "local_moe" hparams.shared_embedding_and_softmax_weights = False hparams.learning_rate_warmup_steps = 10000 # one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128 hparams.learning_rate_decay_steps = 27200 hparams.num_heads = 4 hparams.num_blocks = 1 hparams.moe_k = 1 hparams.num_decoder_layers = 6 hparams.label_smoothing = 0. hparams.layer_prepostprocess_dropout = 0.1 hparams.layer_postprocess_sequence = "dan" hparams.layer_preprocess_sequence = "none" hparams.weight_decay = 1e-06 hparams.attention_dropout = 0.1 hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay" hparams.activation_dtype = "float32" hparams.learning_rate = 0.1 hparams.learning_rate_constant = 1.0 return hparams
Set of hyperparameters.
def transformer_base_vq1_16_nb1_packed_nda_b01_scales(): """Set of hyperparameters.""" hparams = transformer_base_vq_ada_32ex_packed() hparams.use_scales = int(True) hparams.moe_num_experts = 16 hparams.moe_k = 1 hparams.beta = 0.1 hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.ema = False return hparams
Set of hyperparameters.
def transformer_base_vq1_16_nb1_packed_dan_b01_scales(): """Set of hyperparameters.""" hparams = transformer_base_vq_ada_32ex_packed() hparams.use_scales = int(True) hparams.moe_num_experts = 16 hparams.moe_k = 1 hparams.beta = 0.1 hparams.ema = False return hparams
Set of hyperparameters.
def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog(): """Set of hyperparameters.""" hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales() hparams.batch_size = 2048 hparams.max_length = 1024 hparams.filter_size = 3072 return hparams
Set of hyperparameters.
def transformer_ada_lmpackedbase_dialog(): """Set of hyperparameters.""" hparams = transformer_base_vq_ada_32ex_packed() hparams.max_length = 1024 hparams.ffn_layer = "dense_relu_dense" hparams.batch_size = 4096 return hparams
Base parameters for Transformer model.
def transformer_base_v3(): """Base parameters for Transformer model.""" # Update parameters here, then occasionally cut a versioned set, e.g. # transformer_base_v2. hparams = transformer_base_v2() hparams.optimizer_adam_beta2 = 0.997 # New way of specifying learning rate schedule. # Equivalent to previous version. hparams.learning_rate_schedule = ( "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size") hparams.learning_rate_constant = 2.0 return hparams
HParams for transformer big model on WMT.
def transformer_big(): """HParams for transformer big model on WMT.""" hparams = transformer_base() hparams.hidden_size = 1024 hparams.filter_size = 4096 # Reduce batch size to 2048 from 4096 to be able to train the model on a GPU # with 12 GB memory. For example, NVIDIA TITAN V GPU. hparams.batch_size = 2048 hparams.num_heads = 16 hparams.layer_prepostprocess_dropout = 0.3 return hparams
Hparams for transformer on LM for pretraining/finetuning/mixing.
def transformer_tall(): """Hparams for transformer on LM for pretraining/finetuning/mixing.""" hparams = transformer_base() hparams.batch_size = 2048 hparams.hidden_size = 768 hparams.filter_size = 3072 hparams.num_hidden_layers = 12 hparams.num_heads = 12 hparams.label_smoothing = 0.0 hparams.max_length = 1024 hparams.eval_drop_long_sequences = True hparams.multiproblem_mixing_schedule = "pretrain" hparams.multiproblem_vocab_size = 65536 hparams.clip_grad_norm = 1.0 return hparams
Tied means fine-tune CNN/DM summarization as LM.
def transformer_tall_finetune_tied(): """Tied means fine-tune CNN/DM summarization as LM.""" hparams = transformer_tall() hparams.multiproblem_max_input_length = 750 hparams.multiproblem_max_target_length = 100 hparams.multiproblem_schedule_max_examples = 0 hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") hparams.learning_rate_constant = 5e-5 hparams.learning_rate_warmup_steps = 100 # Set train steps to learning_rate_decay_steps or less hparams.learning_rate_decay_steps = 80000 hparams.multiproblem_target_eval_only = True hparams.multiproblem_reweight_label_loss = True hparams.multiproblem_label_weight = 1.0 hparams.optimizer = "true_adam" return hparams
Fine-tune CNN/DM with a unidirectional encoder and decoder.
def transformer_tall_finetune_uniencdec(): """Fine-tune CNN/DM with a unidirectional encoder and decoder.""" hparams = transformer_tall() hparams.max_input_seq_length = 750 hparams.max_target_seq_length = 100 hparams.optimizer = "true_adam" hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") hparams.learning_rate_decay_steps = 80000 hparams.learning_rate_constant = 5e-5 hparams.learning_rate_warmup_steps = 100 hparams.unidirectional_encoder = True return hparams
Train CNN/DM with a unidirectional encoder and decoder.
def transformer_tall_train_uniencdec(): """Train CNN/DM with a unidirectional encoder and decoder.""" hparams = transformer_tall() hparams.max_input_seq_length = 750 hparams.max_target_seq_length = 100 hparams.optimizer = "true_adam" hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") hparams.learning_rate_decay_steps = 150000 hparams.learning_rate_constant = 2e-4 hparams.unidirectional_encoder = True return hparams
Hparams for transformer on LM for finetuning on text class problems.
def transformer_tall_finetune_textclass(): """Hparams for transformer on LM for finetuning on text class problems.""" hparams = transformer_tall() hparams.learning_rate_constant = 6.25e-5 hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay") hparams.multiproblem_schedule_max_examples = 0 hparams.multiproblem_target_eval_only = True hparams.learning_rate_warmup_steps = 50 # Set train steps to learning_rate_decay_steps or less hparams.learning_rate_decay_steps = 25000 hparams.multiproblem_reweight_label_loss = True hparams.multiproblem_label_weight = 0.95 return hparams
Hparams for transformer on LM pretraining (with 64k vocab).
def transformer_tall_pretrain_lm(): """Hparams for transformer on LM pretraining (with 64k vocab).""" hparams = transformer_tall() hparams.learning_rate_constant = 2e-4 hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") hparams.optimizer = "adam_w" hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.999 hparams.optimizer_adam_epsilon = 1e-8 # Set max examples to something big when pretraining only the LM, definitely # something an order of magnitude bigger than number of train steps. hparams.multiproblem_schedule_max_examples = 5e8 # Set train steps to learning_rate_decay_steps or less hparams.learning_rate_decay_steps = 5000000 return hparams
Hparams for transformer on LM pretraining (with 64k vocab) on TPU.
def transformer_tall_pretrain_lm_tpu_adafactor(): """Hparams for transformer on LM pretraining (with 64k vocab) on TPU.""" hparams = transformer_tall_pretrain_lm() update_hparams_for_tpu(hparams) hparams.max_length = 1024 # For multi-problem on TPU we need it in absolute examples. hparams.batch_size = 8 hparams.multiproblem_vocab_size = 2**16 return hparams
Hparams for transformer on LM pretraining on TPU, large model.
def transformer_tall_pretrain_lm_tpu_adafactor_large(): """Hparams for transformer on LM pretraining on TPU, large model.""" hparams = transformer_tall_pretrain_lm_tpu_adafactor() hparams.hidden_size = 1024 hparams.num_heads = 16 hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2 hparams.batch_size = 4 hparams.multiproblem_mixing_schedule = "constant" # Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad. hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5" return hparams
Hparams for transformer on LM pretraining on TPU with AdamW.
def transformer_tall_pretrain_lm_tpu(): """Hparams for transformer on LM pretraining on TPU with AdamW.""" hparams = transformer_tall_pretrain_lm_tpu_adafactor() # Optimizer gets reset in update_hparams_for_tpu so we set it again here. hparams.learning_rate_constant = 2e-4 hparams.learning_rate_schedule = ("linear_warmup * constant * cosdecay") hparams.optimizer = "adam_w" return hparams
HParams for transformer base model for single GPU.
def transformer_base_single_gpu(): """HParams for transformer base model for single GPU.""" hparams = transformer_base() hparams.batch_size = 1024 hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay" hparams.learning_rate_constant = 0.1 hparams.learning_rate_warmup_steps = 16000 return hparams
HParams for parsing on WSJ only.
def transformer_parsing_base(): """HParams for parsing on WSJ only.""" hparams = transformer_base() hparams.attention_dropout = 0.2 hparams.layer_prepostprocess_dropout = 0.2 hparams.max_length = 512 hparams.learning_rate_warmup_steps = 16000 hparams.hidden_size = 1024 hparams.learning_rate = 0.05 hparams.shared_embedding_and_softmax_weights = False return hparams
HParams for parsing on WSJ semi-supervised.
def transformer_parsing_big(): """HParams for parsing on WSJ semi-supervised.""" hparams = transformer_big() hparams.max_length = 512 hparams.shared_source_target_embedding = False hparams.learning_rate_warmup_steps = 4000 hparams.layer_prepostprocess_dropout = 0.1 hparams.batch_size = 2048 hparams.learning_rate = 0.05 return hparams
Small range of hyperparameters.
def transformer_base_range(rhp): """Small range of hyperparameters.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) rhp.set_discrete("learning_rate_warmup_steps", [1000, 2000, 4000, 8000, 16000]) rhp.set_float("initializer_gain", 0.5, 2.0) rhp.set_float("optimizer_adam_beta1", 0.85, 0.95) rhp.set_float("optimizer_adam_beta2", 0.97, 0.99) rhp.set_float("weight_decay", 0.0, 1e-4)
Use relative position embeddings instead of absolute position encodings.
def transformer_relative(): """Use relative position embeddings instead of absolute position encodings.""" hparams = transformer_base() hparams.pos = None hparams.self_attention_type = "dot_product_relative" hparams.max_relative_position = 20 return hparams
HParams for Transformer model on TPU for MLPerf on TPU 2x2.
def transformer_mlperf_tpu(): """HParams for Transformer model on TPU for MLPerf on TPU 2x2.""" hparams = transformer_base_v3() hparams.mlperf_mode = True hparams.symbol_modality_num_shards = 1 hparams.max_length = 256 # ignored when using "_packed" problems hparams.batch_size = 2048 # per-chip batch size matches the reference model hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.num_heads = 16 hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length return hparams
Change hparams to be compatible with TPU training.
def update_hparams_for_tpu(hparams): """Change hparams to be compatible with TPU training.""" # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 # Avoid an expensive concat on TPU. # >1 shards helps with faster parameter distribution on multi-GPU machines hparams.symbol_modality_num_shards = 1 # Adaptive batch sizes and sequence lengths are not supported on TPU. # Instead, every batch has the same sequence length and the same batch size. # Longer sequences are dropped and shorter ones are padded. # # It is therefore suggested to use a problem where examples have been combined # to a longer length, e.g. the "_packed" problems. # # For problems with variable sequence lengths, this parameter controls the # maximum sequence length. Shorter sequences are dropped and longer ones # are padded. # # For problems with fixed sequence lengths - e.g. the "_packed" problems, # this hyperparameter is ignored. hparams.max_length = 64 # TPUs have less memory than GPUs, so decrease the batch size hparams.batch_size = 2048 # Using noise broadcast in the dropout layers saves memory during training. hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length return hparams
Small range of hyperparameters.
def transformer_tpu_range(rhp): """Small range of hyperparameters.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) rhp.set_discrete("learning_rate_warmup_steps", [1000, 2000, 4000, 8000, 16000]) rhp.set_float("initializer_gain", 0.5, 2.0) rhp.set_float("optimizer_adam_beta1", 0.85, 0.95) rhp.set_float("optimizer_adam_beta2", 0.97, 0.99) rhp.set_float("weight_decay", 0.0, 2.0)
No dropout, label smoothing, max_length.
def transformer_clean(): """No dropout, label smoothing, max_length.""" hparams = transformer_base_v2() hparams.label_smoothing = 0.0 hparams.layer_prepostprocess_dropout = 0.0 hparams.attention_dropout = 0.0 hparams.relu_dropout = 0.0 hparams.max_length = 0 return hparams
HParams for training languagemodel_lm1b8k on tpu. 92M Params.
def transformer_lm_tpu_0(): """HParams for training languagemodel_lm1b8k on tpu. 92M Params.""" hparams = transformer_clean_big() update_hparams_for_tpu(hparams) hparams.num_heads = 4 # Heads are expensive on TPUs. hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False hparams.layer_prepostprocess_dropout = 0.1 return hparams
HParams for training ASR model on LibriSpeech V1.
def transformer_librispeech_v1(): """HParams for training ASR model on LibriSpeech V1.""" hparams = transformer_base() hparams.num_heads = 4 hparams.filter_size = 1024 hparams.hidden_size = 256 hparams.num_encoder_layers = 5 hparams.num_decoder_layers = 3 hparams.learning_rate = 0.15 hparams.batch_size = 6000000 librispeech.set_librispeech_length_hparams(hparams) return hparams
HParams for training ASR model on Librispeech on TPU v1.
def transformer_librispeech_tpu_v1(): """HParams for training ASR model on Librispeech on TPU v1.""" hparams = transformer_librispeech_v1() update_hparams_for_tpu(hparams) hparams.batch_size = 16 librispeech.set_librispeech_length_hparams(hparams) return hparams
HParams for training ASR model on LibriSpeech V2.
def transformer_librispeech_v2(): """HParams for training ASR model on LibriSpeech V2.""" hparams = transformer_base() hparams.max_length = 1240000 hparams.max_input_seq_length = 1550 hparams.max_target_seq_length = 350 hparams.batch_size = 16 hparams.num_decoder_layers = 4 hparams.num_encoder_layers = 6 hparams.hidden_size = 384 hparams.learning_rate = 0.15 hparams.daisy_chain_variables = False hparams.filter_size = 1536 hparams.num_heads = 2 hparams.ffn_layer = "conv_relu_conv" hparams.conv_first_kernel = 9 hparams.weight_decay = 0 hparams.layer_prepostprocess_dropout = 0.2 hparams.relu_dropout = 0.2 return hparams
HParams for training ASR model on Librispeech on TPU v2.
def transformer_librispeech_tpu_v2(): """HParams for training ASR model on Librispeech on TPU v2.""" hparams = transformer_librispeech_v2() update_hparams_for_tpu(hparams) hparams.batch_size = 16 librispeech.set_librispeech_length_hparams(hparams) return hparams
Hparams for machine translation with ~1.1B parameters.
def transformer_tpu_1b(): """Hparams for machine translation with ~1.1B parameters.""" hparams = transformer_tpu() hparams.hidden_size = 2048 hparams.filter_size = 8192 hparams.num_hidden_layers = 8 # smaller batch size to avoid OOM hparams.batch_size = 1024 hparams.activation_dtype = "bfloat16" hparams.weight_dtype = "bfloat16" # maximize number of parameters relative to computation by not sharing. hparams.shared_embedding_and_softmax_weights = False return hparams
HParams for training languagemodel_wikitext103_l4k.
def transformer_wikitext103_l4k_v0(): """HParams for training languagemodel_wikitext103_l4k.""" hparams = transformer_big() # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 hparams.num_heads = 4 hparams.max_length = 4096 hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False hparams.num_hidden_layers = 8 hparams.attention_dropout = 0.1 hparams.layer_prepostprocess_dropout = 0.2 hparams.relu_dropout = 0.1 hparams.label_smoothing = 0.0 # Using noise broadcast in the dropout layers saves memory during training. hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length # Avoid an expensive concat on TPU. # >1 shards helps with faster parameter distribution on multi-GPU machines hparams.symbol_modality_num_shards = 1 return hparams
HParams for training languagemodel_wikitext103_l4k with memory.
def transformer_wikitext103_l4k_memory_v0(): """HParams for training languagemodel_wikitext103_l4k with memory.""" hparams = transformer_wikitext103_l4k_v0() hparams.split_targets_chunk_length = 64 hparams.split_targets_max_chunks = 64 hparams.split_targets_strided_training = True hparams.add_hparam("memory_type", "transformer_xl") # The hparams specify batch size *before* chunking, but we want to have a # consistent 4K batch size *after* chunking to fully utilize the hardware. target_tokens_per_batch = 4096 hparams.batch_size = int(target_tokens_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) # 262144 hparams.pos = None hparams.self_attention_type = "dot_product_relative" hparams.max_relative_position = 2 * hparams.split_targets_chunk_length hparams.add_hparam("unconditional", True) hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess # By default, cache one chunk only (like Transformer-XL) hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length) return hparams
HParams for training languagemodel_wikitext103_l16k with memory.
def transformer_wikitext103_l16k_memory_v0(): """HParams for training languagemodel_wikitext103_l16k with memory.""" hparams = transformer_wikitext103_l4k_memory_v0() hparams.max_length = 16384 hparams.split_targets_chunk_length = 64 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split_targets_chunk_length) # The hparams specify batch size *before* chunking, but we want to have a # consistent 4K batch size *after* chunking to fully utilize the hardware. target_tokens_per_batch = 4096 hparams.batch_size = int(target_tokens_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) hparams.max_relative_position = 2 * hparams.split_targets_chunk_length return hparams
HParams for training image_cifar10_plain_gen_flat_rev with memory.
def transformer_cifar10_memory_v0(): """HParams for training image_cifar10_plain_gen_flat_rev with memory.""" hparams = transformer_wikitext103_l4k_memory_v0() hparams.num_hidden_layers = 6 hparams.max_length = 32 * 32 * 3 hparams.split_targets_chunk_length = 64 * 3 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split_targets_chunk_length) hparams.num_memory_items = 128 * 3 # Since this is an image problem, batch size refers to examples (not tokens) target_images_per_batch = 4 hparams.batch_size = int(target_images_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) # The recurrent memory needs to know the actual batch size (in sequences) hparams.recurrent_memory_batch_size = hparams.batch_size hparams.max_relative_position = ( hparams.num_memory_items + hparams.split_targets_chunk_length) return hparams
HParams for training image_imagenet64_gen_flat_rev with memory.
def transformer_imagenet64_memory_v0(): """HParams for training image_imagenet64_gen_flat_rev with memory.""" hparams = transformer_cifar10_memory_v0() hparams.max_length = 64 * 64 * 3 hparams.split_targets_chunk_length = 64 * 3 hparams.split_targets_max_chunks = int( hparams.max_length / hparams.split_targets_chunk_length) hparams.num_memory_items = 128 * 3 # Since this is an image problem, batch size refers to examples (not tokens) target_images_per_batch = 2 hparams.batch_size = int(target_images_per_batch * ( hparams.max_length / hparams.split_targets_chunk_length)) # The recurrent memory needs to know the actual batch size (in sequences) hparams.recurrent_memory_batch_size = hparams.batch_size hparams.max_relative_position = 3072 return hparams
Reshape input from 4D to 3D if necessary.
def maybe_reshape_4d_to_3d(x): """Reshape input from 4D to 3D if necessary.""" x_shape = common_layers.shape_list(x) is_4d = False if len(x_shape) == 4: x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]]) is_4d = True return x, x_shape, is_4d
Local 2d, self attention layer.
def local_attention_2d(x, hparams, attention_type="local_attention_2d"): """Local 2d, self attention layer.""" # self-attention with tf.variable_scope("local_2d_self_att"): y = common_attention.multihead_attention_2d( x, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, attention_type=attention_type, query_shape=hparams.query_shape, memory_flange=hparams.memory_flange, name="self_attention") return y
Local within block self attention.
def local_within_block_attention(x, self_attention_bias, hparams, attention_type="local_within_block_mask_right", q_padding="VALID", kv_padding="VALID"): """Local within block self attention.""" x_new, x_shape, is_4d = maybe_reshape_4d_to_3d(x) with tf.variable_scope("local_within_block"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x_new, hparams), None, self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=attention_type, block_width=hparams.block_width, block_length=hparams.block_length, q_padding=q_padding, kv_padding=kv_padding, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, name="local_within_block") if is_4d: y = tf.reshape(y, x_shape) return y
Local 1d self attention.
def local_attention_1d(x, hparams, attention_type="local_unmasked", q_padding="VALID", kv_padding="VALID"): """Local 1d self attention.""" # self-attention x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) with tf.variable_scope("local_1d_self_att"): y = common_attention.multihead_attention( x, None, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=attention_type, shared_rel=hparams.shared_rel, block_width=hparams.block_width, block_length=hparams.block_length, q_padding=q_padding, kv_padding=kv_padding, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, make_image_summary=False, name="self_attention") if is_4d: y = tf.reshape(y, x_shape) return y
Dilated attention with a masking strategy.
def get_dilated_1d_attention_mask( num_heads, block_size, num_blocks, memory_size, gap_size, name="dilated_mask"): """Dilated attention with a masking strategy.""" mask = np.ones((num_heads, block_size, 2*block_size), np.bool) # now going over every row to do the right assignment of # memory blocks for i in range(block_size): visible = 2*block_size - (block_size-i) # You always attend to yourself, set the mask for that mask[:, i, -(block_size - i)] = 0 # Maybe num_blocks can be automatically calculated? for j in range(num_blocks): for k in range(memory_size): index = ((gap_size + memory_size)*j) + k if index >= visible: break mask[:, i, -(index + block_size - i + 1)] = 0 # Verify # adding a num blocks dimension mask = np.expand_dims(mask, axis=1) return tf.constant(mask, dtype=tf.int32, name=name)
Dilated 1d self attention.
def dilated_attention_1d(x, hparams, attention_type="masked_dilated_1d", q_padding="VALID", kv_padding="VALID", gap_size=2): """Dilated 1d self attention.""" # self-attention x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) with tf.variable_scope("masked_dilated_1d"): y = common_attention.multihead_attention( x, None, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=attention_type, block_width=hparams.block_width, block_length=hparams.block_length, q_padding=q_padding, kv_padding=kv_padding, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, gap_size=gap_size, num_memory_blocks=hparams.num_memory_blocks, name="self_attention") if is_4d: y = tf.reshape(y, x_shape) y.set_shape([None, None, None, hparams.hidden_size]) return y
Local and global 1d self attention.
def local_global_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Local and global 1d self attention.""" with tf.variable_scope("self_local_global_att"): [x_global, x_local] = tf.split(x, 2, axis=-1) split_hidden_size = int(hparams.hidden_size / 2) split_heads = int(hparams.num_heads / 2) if self_attention_bias is not None: self_attention_bias = get_self_attention_bias(x) y_global = common_attention.multihead_attention( x_global, None, self_attention_bias, hparams.attention_key_channels or split_hidden_size, hparams.attention_value_channels or split_hidden_size, split_hidden_size, split_heads, hparams.attention_dropout, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="global_self_att") y_local = common_attention.multihead_attention( x_local, None, None, hparams.attention_key_channels or split_hidden_size, hparams.attention_value_channels or split_hidden_size, split_hidden_size, split_heads, hparams.attention_dropout, attention_type="local_masked", block_length=hparams.block_length, block_width=hparams.block_width, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="local_self_att") y = tf.concat([y_global, y_local], axis=-1) return y
Full self-attention layer.
def full_self_attention(x, self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT"): """Full self-attention layer.""" x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) if self_attention_bias is not None: self_attention_bias = get_self_attention_bias(x) with tf.variable_scope("self_att"): y = common_attention.multihead_attention( x, None, self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, q_padding=q_padding, kv_padding=kv_padding, name="self_att") if is_4d: y = tf.reshape(y, [x_shape[0], x_shape[1], x_shape[2], x_shape[3]]) y.set_shape([None, None, None, hparams.hidden_size]) return y
Local 1d self attention.
def encdec_attention_1d(x, encoder_output, encoder_decoder_attention_bias, hparams): """Local 1d self attention.""" x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) encoder_output, _, _ = maybe_reshape_4d_to_3d(encoder_output) with tf.variable_scope("encdec_attention"): # Encoder Decoder attention y = common_attention.multihead_attention( x, encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="encdec_attention") if is_4d: y = tf.reshape(y, x_shape) y.set_shape([None, None, None, hparams.hidden_size]) return y
Multi layer transformer.
def transformer_decoder_layers(inputs, encoder_output, num_layers, hparams, self_attention_bias=None, encoder_decoder_attention_bias=None, attention_type=AttentionType.LOCAL_2D, losses=None, name="transformer"): """Multi layer transformer.""" x = inputs x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) if attention_type == AttentionType.DILATED: assert len(hparams.gap_sizes) == num_layers for layer in range(num_layers): with tf.variable_scope("%s_layer_%d" % (name, layer)): # self-attention + skip connections if attention_type == AttentionType.LOCAL_2D: y = local_attention_2d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="masked_local_attention_2d") elif attention_type == AttentionType.LOCAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.RELATIVE_LOCAL_1D: y = local_attention_1d( common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_relative_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.NON_CAUSAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_unmasked", q_padding="VALID", kv_padding="VALID") elif attention_type == AttentionType.LOCAL_BLOCK: y = local_within_block_attention( common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, attention_type="local_within_block_mask_right", q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.GLOCAL: y = local_global_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT") elif attention_type == AttentionType.DILATED: y = dilated_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, q_padding="LEFT", kv_padding="LEFT", gap_size=hparams.gap_sizes[layer]) elif attention_type == AttentionType.GLOBAL: y = full_self_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding="LEFT", kv_padding="LEFT") x = common_layers.layer_postprocess(x, y, hparams) # enc-dec attention + skip connections if encoder_output is not None: y = encdec_attention_1d(common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams) x = common_layers.layer_postprocess(x, y, hparams) # feed-fwd layers + skip connections y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams, losses=losses) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
Multi layer transformer encoder.
def transformer_encoder_layers(inputs, num_layers, hparams, attention_type=AttentionType.GLOBAL, self_attention_bias=None, q_padding="VALID", kv_padding="VALID", name="transformer"): """Multi layer transformer encoder.""" x = inputs x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) for layer in range(num_layers): # attention layers + skip connections with tf.variable_scope("%s_layer_%d" % (name, layer)): if attention_type == AttentionType.LOCAL_2D: y = local_attention_2d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_attention_2d") elif attention_type == AttentionType.LOCAL_1D: y = local_attention_1d(common_layers.layer_preprocess(x, hparams), hparams, attention_type="local_unmasked", q_padding=q_padding, kv_padding=kv_padding) elif attention_type == AttentionType.GLOBAL: y = full_self_attention(common_layers.layer_preprocess(x, hparams), self_attention_bias, hparams, q_padding=q_padding, kv_padding=kv_padding) x = common_layers.layer_postprocess(x, y, hparams) # feed-fwd layer + skip connections y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1]
def get_self_attention_bias(x): """Creates masked self attention bias. Args: x: A tensor of shape [batch, length, depth] Returns: self_attention_bias: A tensor of shape [length, length, 1] """ x_shape = common_layers.shape_list(x) self_attention_bias = common_attention.attention_bias_lower_triangle( x_shape[1]) return self_attention_bias
ffn layer transformer.
def ffn_layer(x, hparams, losses=None): """ffn layer transformer.""" with tf.variable_scope("ffn"): if hparams.ffn_layer == "none": return x if hparams.ffn_layer == "conv_hidden_relu": y = common_layers.dense_relu_dense( x, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout) elif hparams.ffn_layer == "normed_conv_hidden_relu": y = common_layers.normed_conv_hidden_relu( x, hparams.norm_type, hparams.layer_norm_epsilon, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, norm_name="convnorm") elif hparams.ffn_layer == "self_attention_ffn": x_shape = tf.shape(x) x = tf.reshape(x, [x_shape[0], -1, hparams.hidden_size]) y = common_attention.ffn_self_attention_layer( x, hparams.filter_size, hparams.hidden_size, hparams.num_parts, hparams.attention_dropout, hparams.share_kv) y = tf.reshape(y, x_shape) elif hparams.ffn_layer == "local_moe_tpu": overhead = (hparams.moe_overhead_train if hparams.mode == tf.estimator.ModeKeys.TRAIN else hparams.moe_overhead_eval) x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) y, loss = expert_utils.local_moe_tpu( x, hparams.filter_size // 2, hparams.hidden_size, hparams.moe_num_experts, overhead=overhead, loss_coef=hparams.moe_loss_coef) if is_4d: y = tf.reshape(y, x_shape) if losses is None: raise ValueError( "transformer_ffn_layer with type local_moe_tpu must pass in " "a losses list") losses.append(loss) else: assert hparams.ffn_layer == "glu_ffn" y = common_layers.gated_linear_unit_layer(x) return y
Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. hparams: HParams set. Returns: Tensor of shape [batch, rows, cols, depth], where depth is hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In the special case of inference and block raster scan order, it is a Tensor of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width, depth].
def postprocess_image(x, rows, cols, hparams): """Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. hparams: HParams set. Returns: Tensor of shape [batch, rows, cols, depth], where depth is hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In the special case of inference and block raster scan order, it is a Tensor of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width, depth]. """ batch = common_layers.shape_list(x)[0] x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size]) likelihood = getattr(hparams, "likelihood", DistributionType.CAT) if likelihood == DistributionType.DMOL: depth = hparams.num_mixtures * 10 targets = tf.layers.dense(x, depth, use_bias=False, activation=None, name="output_conv") else: depth = 256 targets = tf.layers.dense(x, depth, use_bias=True, activation=None, name="output_conv") if (hparams.mode == tf.estimator.ModeKeys.PREDICT and hparams.block_raster_scan): y = targets yshape = common_layers.shape_list(y) block_length = hparams.query_shape[0] block_width = hparams.query_shape[1] # Break into block row wise. y = tf.reshape(y, [batch, yshape[1] // block_length, block_length, yshape[2], depth]) yshape = common_layers.shape_list(y) # Break into blocks width wise. y_blocks = tf.reshape(y, [batch, yshape[1], yshape[2], yshape[3] // block_width, block_width, depth]) # Reshape targets as [batch, num_blocks_rows, num_block_cols, block_length, # block_width, depth]. targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5]) return targets
Prepare encoder for images.
def prepare_encoder(inputs, hparams, attention_type="local_1d"): """Prepare encoder for images.""" x = prepare_image(inputs, hparams, name="enc_channels") # Add position signals. x = add_pos_signals(x, hparams, "enc_pos") x_shape = common_layers.shape_list(x) if attention_type == "local_1d": x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) x.set_shape([None, None, hparams.hidden_size]) elif attention_type == "local_2d": x.set_shape([None, None, None, hparams.hidden_size]) return x
Prepare decoder for images.
def prepare_decoder(targets, hparams): """Prepare decoder for images.""" targets_shape = common_layers.shape_list(targets) channels = hparams.num_channels curr_infer_length = None # during training, images are [batch, IMG_LEN, IMG_LEN, 3]. # At inference, they are [batch, curr_infer_length, 1, 1] if hparams.mode == tf.estimator.ModeKeys.PREDICT: curr_infer_length = targets_shape[1] if hparams.block_raster_scan: assert hparams.img_len*channels % hparams.query_shape[1] == 0 assert hparams.img_len % hparams.query_shape[0] == 0 total_block_width = hparams.img_len*channels # Decoding is in block raster scan order. We divide the image into # hparams.query_shape blocks and then decode each block in raster scan. # To make that compatible with our inference pipeline, pad the target so # that rows is a multiple of query_shape and columns is a multiple of # hparams.img_len*channels curr_infer_length = targets_shape[1] block_padding_factor = total_block_width * hparams.query_shape[0] targets = tf.pad(targets, [ [0, 0], [0, -curr_infer_length % block_padding_factor], [0, 0], [0, 0]]) num_blocks = total_block_width // hparams.query_shape[1] # Reshape the image to represent blocks target_blocks = tf.reshape( targets, [targets_shape[0], -1, num_blocks, hparams.query_shape[0], hparams.query_shape[1]]) # Transpose to read the image in 2D fashion. targets = tf.transpose(target_blocks, [0, 1, 3, 2, 4]) else: # add padding to make sure the size of targets is a multiple of img_height # times number of channels. This is needed for positional encodings and # for doing the RGB lookup. padding_factor = channels * hparams.img_len targets = tf.pad(targets, [ [0, 0], [0, -curr_infer_length % padding_factor], [0, 0], [0, 0]]) targets = tf.reshape(targets, [targets_shape[0], -1, hparams.img_len, channels]) # Preprocess image x = prepare_image(targets, hparams, name="dec_channels") x_shape = common_layers.shape_list(x) if (hparams.dec_attention_type == AttentionType.LOCAL_2D or hparams.dec_attention_type == AttentionType.LOCAL_BLOCK): x = common_attention.right_shift_blockwise(x, hparams.query_shape) x = add_pos_signals(x, hparams, "dec_pos") else: # Add position signals x = tf.reshape(x, [targets_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) x = common_layers.shift_right_3d(x) x = tf.reshape(x, [targets_shape[0], x_shape[1], x_shape[2], hparams.hidden_size]) x = add_pos_signals(x, hparams, "dec_pos") x = common_layers.cast_like(x, targets) return x, x_shape[1], x_shape[2]
Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_channels]. hparams: HParams set. Returns: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. In the special case of predict mode, it is a Tensor of rank 5.
def create_output(decoder_output, rows, cols, targets, hparams): """Creates output from decoder output and vars. Args: decoder_output: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_channels]. hparams: HParams set. Returns: Tensor of shape [batch, hparams.img_len, hparams.img_len, hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. In the special case of predict mode, it is a Tensor of rank 5. """ del targets # unused arg decoded_image = postprocess_image(decoder_output, rows, cols, hparams) batch = common_layers.shape_list(decoded_image)[0] depth = common_layers.shape_list(decoded_image)[-1] likelihood = getattr(hparams, "likelihood", DistributionType.CAT) if hparams.mode == tf.estimator.ModeKeys.PREDICT: y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth]) output = y[:, :rows, :, :, :] elif likelihood == DistributionType.CAT: # Unpack the cols dimension of the Categorical. channels = hparams.num_channels output = tf.reshape(decoded_image, [batch, rows, cols // channels, channels, depth]) else: output = decoded_image return output
Get separate embedding for each of the channels.
def get_channel_embeddings(io_depth, targets, hidden_size, name="channel"): """Get separate embedding for each of the channels.""" targets_split = tf.split(targets, io_depth, axis=3) rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name, [256 * io_depth, hidden_size]) rgb_embedding_var = tf.identity(rgb_embedding_var) rgb_embedding_var *= float(hidden_size)**0.5 channel_target_embs = [] for i in range(io_depth): # Adding the channel offsets to get the right embedding since the # embedding tensor has shape 256 * io_depth, hidden_size target_ids = tf.squeeze(targets_split[i], axis=3) + i * 256 target_embs = common_layers.gather(rgb_embedding_var, target_ids) channel_target_embs.append(target_embs) return tf.concat(channel_target_embs, axis=-1)
Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation.
def simulate(self, action): """Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. """ with tf.name_scope("environment/simulate"): if action.dtype in (tf.float16, tf.float32, tf.float64): action = tf.check_numerics(action, "action") def step(action): step_response = self._batch_env.step(action) # Current env doesn't return `info`, but EnvProblem does. # TODO(afrozm): The proper way to do this is to make T2TGymEnv return # an empty info return value. if len(step_response) == 3: (observ, reward, done) = step_response else: (observ, reward, done, _) = step_response return (observ, reward.astype(np.float32), done) observ, reward, done = tf.py_func( step, [action], [self.observ_dtype, tf.float32, tf.bool], name="step") reward = tf.check_numerics(reward, "reward") reward.set_shape((len(self),)) done.set_shape((len(self),)) with tf.control_dependencies([self._observ.assign(observ)]): return tf.identity(reward), tf.identity(done)
Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations.
def _reset_non_empty(self, indices): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ observ = tf.py_func( self._batch_env.reset, [indices], self.observ_dtype, name="reset") observ.set_shape(indices.get_shape().concatenate(self.observ_shape)) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ)]): return tf.identity(observ)
Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an integer skip_factor: a floating point number >= 1.0 Returns: a boolean
def include_revision(revision_num, skip_factor=1.1): """Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an integer skip_factor: a floating point number >= 1.0 Returns: a boolean """ if skip_factor <= 1.0: return True return (int(math.log1p(revision_num) / math.log(skip_factor)) != int( math.log(revision_num + 2.0) / math.log(skip_factor)))
Read wikipedia pages from a history dump. Since some pages can be terabytes in size (with all the revisions), we limit page size to max_page_size bytes. Args: my_file: an open file object. max_page_size: an integer Yields: strings
def file_page_generator(my_file, max_page_size=2**28): """Read wikipedia pages from a history dump. Since some pages can be terabytes in size (with all the revisions), we limit page size to max_page_size bytes. Args: my_file: an open file object. max_page_size: an integer Yields: strings """ page_start = " <page>\n" page_end = " </page>\n" chunk_size = max_page_size page_start = " <page>\n" page_end = " </page>\n" leftovers = "" while True: chunk = my_file.read(chunk_size) if not chunk: break chunk = leftovers + chunk current_pos = 0 while True: start_pos = chunk.find(page_start, current_pos) if start_pos == -1: break end_pos = chunk.find(page_end, start_pos) if end_pos == -1: if len(chunk) - start_pos > max_page_size: leftovers = "" else: leftovers = chunk[start_pos:] break raw_page = chunk[start_pos + len(page_start):end_pos] if len(raw_page) < max_page_size: ret = parse_page(raw_page) if ret: yield ret current_pos = end_pos + len(page_end)
Extract the title from a page. Args: page: a string Returns: a string
def get_title(page): """Extract the title from a page. Args: page: a string Returns: a string """ start_pos = page.find("<title>") end_pos = page.find("</title>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<title>") return text_encoder.to_unicode_utf8(page[start_pos:end_pos])
Extract the id from a page. Args: page: a string Returns: an integer
def get_id(page): """Extract the id from a page. Args: page: a string Returns: an integer """ start_pos = page.find("<id>") end_pos = page.find("</id>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<id>") return int(page[start_pos:end_pos])
Extract the revisions of a page. Args: page: a string Returns: a list of strings
def get_revisions(page): """Extract the revisions of a page. Args: page: a string Returns: a list of strings """ start_string = " <revision>\n" end_string = " </revision>\n" ret = [] current_pos = 0 while True: start_pos = page.find(start_string, current_pos) if start_pos == -1: break end_pos = page.find(end_string, start_pos) assert end_pos != -1 ret.append(page[start_pos + len(start_string):end_pos]) current_pos = end_pos + len(end_string) return ret
Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error.
def parse_page(raw_page): """Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error. """ ret = {"title": get_title(raw_page), "id": get_id(raw_page)} if ":" in ret["title"]: return None ret["revisions"] = get_revisions(raw_page) return ret
Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string
def maybe_copy_file_to_directory(source_filepath, target_directory): """Copy a file to a directory if it is not already there. Returns the target filepath. Args: source_filepath: a string target_directory: a string Returns: a string """ if not tf.gfile.Exists(target_directory): tf.logging.info("Creating directory %s" % target_directory) os.mkdir(target_directory) target_filepath = os.path.join(target_directory, os.path.basename(source_filepath)) if not tf.gfile.Exists(target_filepath): tf.logging.info("Copying %s to %s" % (source_filepath, target_filepath)) tf.gfile.Copy(source_filepath, target_filepath) statinfo = os.stat(target_filepath) tf.logging.info("Successfully copied %s, %s bytes." % (target_filepath, statinfo.st_size)) else: tf.logging.info("Not copying, file already found: %s" % target_filepath) return target_filepath
Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings
def corpus_page_generator(corpus_files, tmp_dir, max_page_size_exp): """Generate pages from a list of .7z encoded history dumps. Args: corpus_files: a list of strings tmp_dir: a string max_page_size_exp: an integer Yields: strings """ for remote_filepath in corpus_files: filepath = maybe_copy_file_to_directory(remote_filepath, tmp_dir) tf.logging.info("Reading from " + filepath) command = ["7z", "x", "-so", filepath] tf.logging.info("Running command: %s", command) p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=-1) for page in file_page_generator(p.stdout, 2**max_page_size_exp): yield page
Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string
def get_text(revision, strip=True): """Extract the text from a revision. Args: revision: a string strip: a boolean Returns: a string """ # text start tag looks like "<text ..otherstuff>" start_pos = revision.find("<text") assert start_pos != -1 end_tag_pos = revision.find(">", start_pos) assert end_tag_pos != -1 end_tag_pos += len(">") end_pos = revision.find("</text>") if end_pos == -1: ret = "" else: ret = revision[end_tag_pos:end_pos] if strip: ret = strip_text(ret) ret = text_encoder.to_unicode_utf8(ret) return ret
Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string
def _remove_curly_braces(text): """Remove everything in curly braces. Curly braces may be nested, so we keep track of depth. Args: text: a string Returns: a string """ current_pos = 0 depth = 0 ret = "" for match in re.finditer("[{}]", text): if depth == 0: ret += text[current_pos:match.start()] depth += 1 if text[match.start()] == "{" else -1 current_pos = match.end() if depth != 0: # Many articles have mismatched braces, but it still seems better to remove # them than not. pass else: ret += text[current_pos:] return ret
Remove double brackets, but leave the viewable text. Args: text: a string Returns: a string
def _remove_double_brackets(text): """Remove double brackets, but leave the viewable text. Args: text: a string Returns: a string """ def replacement_fn(s): if ":" in s: # this is probably a category or something like that. return "" # keep the part after the bar. bar_pos = s.find("|") if bar_pos == -1: return s return s[bar_pos + 1:] return _find_and_replace(text, "[[", "]]", replacement_fn)
Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string
def _remove_boring_lines(text): """Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string """ lines = text.split("\n") filtered = [line for line in lines if re.match("[a-zA-z\"\']", line)] return "\n".join(filtered)
Get or generate the vocabulary. Args: data_dir: a string tmp_dir: a string data_prefix: a string max_page_size_exp: an integer approx_vocab_size: an integer strip: a boolean Returns: a TextEncoder
def get_or_generate_vocabulary(data_dir, tmp_dir, data_prefix, max_page_size_exp, approx_vocab_size=32768, strip=True): """Get or generate the vocabulary. Args: data_dir: a string tmp_dir: a string data_prefix: a string max_page_size_exp: an integer approx_vocab_size: an integer strip: a boolean Returns: a TextEncoder """ num_pages_for_vocab_generation = approx_vocab_size // 3 vocab_file = vocab_filename(approx_vocab_size, strip) def my_generator(data_prefix): """Line generator for vocab.""" count = 0 for page in corpus_page_generator( all_corpus_files(data_prefix)[::-1], tmp_dir, max_page_size_exp): revisions = page["revisions"] if revisions: text = get_text(revisions[-1], strip=strip) yield text count += 1 if count % 100 == 0: tf.logging.info("reading pages for vocab %d" % count) if count > num_pages_for_vocab_generation: break return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file, approx_vocab_size, my_generator(data_prefix))
Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set.
def get_encoder_from_vocab(vocab_filepath): """Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set. """ if not tf.gfile.Exists(vocab_filepath): raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath)) tf.logging.info("Found vocab file: %s", vocab_filepath) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source and target Returns: source_target_output: filtered subset of [source, target] input pairs thrown_out_count: number of examples filtered out
def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0): """Filter out examples that exceed max_edit_ratio between source and target. Args: source_target_input: a list of [source, target] pairs max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars between source and target Returns: source_target_output: filtered subset of [source, target] input pairs thrown_out_count: number of examples filtered out """ thrown_out_count = 0 source_target_output = [] if not max_equal_to_diff_ratio: return source_target_input, thrown_out_count for src_tgt in source_target_input: opcodes = fast_match_sequences(*src_tgt) diff_char_count = 0 equal_char_count = 0 for tag, i1, i2, j1, j2 in opcodes: if tag == "diff": # max() prevents double-counting substitutions. diff_char_count += max(i2 - i1, j2 - j1) else: equal_char_count += i2 - i1 if diff_char_count <= max_equal_to_diff_ratio * equal_char_count: source_target_output.append(src_tgt) else: thrown_out_count += 1 return source_target_output, thrown_out_count
Artificially add spelling errors and infill markers. This function should be applied to the inputs of a correction model. The artificial errors are particularly useful to train a network to correct spelling when the training data does not contain many natural errors. Also replaces some substrings with an "infill" marker. e.g. "the fat cat sat on the mat" -> "the fat ca??? the mat" This causes the trained model to learn infilling (predicting what text to insert at the current cursor position). Args: s: a string (the uncorrupted text) corruption_rate: a floating point value. Probability of introducing an error/infill at each character. infill_marker: a string max_infill_len: an optional integer - maximum number of characters to remove and replace by an infill marker. None means no infilling. Returns: a string
def introduce_errors(s, corruption_rate=3e-3, infill_marker="|?|", max_infill_len=8): """Artificially add spelling errors and infill markers. This function should be applied to the inputs of a correction model. The artificial errors are particularly useful to train a network to correct spelling when the training data does not contain many natural errors. Also replaces some substrings with an "infill" marker. e.g. "the fat cat sat on the mat" -> "the fat ca??? the mat" This causes the trained model to learn infilling (predicting what text to insert at the current cursor position). Args: s: a string (the uncorrupted text) corruption_rate: a floating point value. Probability of introducing an error/infill at each character. infill_marker: a string max_infill_len: an optional integer - maximum number of characters to remove and replace by an infill marker. None means no infilling. Returns: a string """ num_errors = 0 ret = [] operations = [ "delete", # delete a character "insert", # insert a random character from the input string "replace", # replace a character with a random character from # the input string "transpose", # transpose two adjacent characters ] if max_infill_len: operations.append("infill") pos = 0 while pos < len(s): if random.random() >= corruption_rate: ret.append(s[pos]) pos += 1 continue num_errors += 1 operation = operations[random.randint(0, len(operations) - 1)] if operation == "delete": pos += 1 elif operation == "insert": ret.append(s[random.randint(0, len(s) - 1)]) elif operation == "replace": ret.append(s[random.randint(0, len(s) - 1)]) pos += 1 elif operation == "transpose": ret.append(s[pos + 1] if pos + 1 < len(s) else "") ret.append(s[pos]) pos += 2 else: assert operation == "infill" ret.append(infill_marker) pos += random.randint(0, max_infill_len) return "".join(ret), num_errors
Compute diffs between two sequences. This function is similar in functionality and spirit to difflib.SequenceMatcher.get_opcodes, but it seems to run faster. if a_start, a_end, b_start, b_end are specified, then we compute diffs of the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices are relative to the full sequence. We try to match the longest matching segments first, but due to heuristics in finding the matches, this is not guaranteed. Matching segments shorter than min_match_length are counted as part of the surrounding differing segments, unless they are at the beginning or end of both sequences. This helps eliminate junk matches. Args: a: a sequence b: a sequence a_start: an optional integer a_end: an optional integer b_start: an optional integer b_end: an optional integer min_match_length: an integer max_recursion_depth: an integer - avoids crashes in weird corner cases involving pairs of long repetitive sequences. Returns: a list of 5-tuples (tag, i1, i2, j1, j2). Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2]. tag is either "equal" or "diff". Note that the tags differ from those returned by difflib.SequenceMatcher.get_opcodes.
def fast_match_sequences(a, b, a_start=0, a_end=None, b_start=0, b_end=None, min_match_length=3, max_recursion_depth=128): """Compute diffs between two sequences. This function is similar in functionality and spirit to difflib.SequenceMatcher.get_opcodes, but it seems to run faster. if a_start, a_end, b_start, b_end are specified, then we compute diffs of the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices are relative to the full sequence. We try to match the longest matching segments first, but due to heuristics in finding the matches, this is not guaranteed. Matching segments shorter than min_match_length are counted as part of the surrounding differing segments, unless they are at the beginning or end of both sequences. This helps eliminate junk matches. Args: a: a sequence b: a sequence a_start: an optional integer a_end: an optional integer b_start: an optional integer b_end: an optional integer min_match_length: an integer max_recursion_depth: an integer - avoids crashes in weird corner cases involving pairs of long repetitive sequences. Returns: a list of 5-tuples (tag, i1, i2, j1, j2). Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2]. tag is either "equal" or "diff". Note that the tags differ from those returned by difflib.SequenceMatcher.get_opcodes. """ if a_end is None: a_end = len(a) if b_end is None: b_end = len(b) if a_start == a_end and b_start == b_end: return [] if a_start == a_end or b_start == b_end: return [("diff", a_start, a_end, b_start, b_end)] # Compute an index from value to first occurrence in the b segment. # Technically, we should index and explore all occurrences of a value, # but that might be much slower. b_index = {} for j in range(b_end - 1, b_start - 1, -1): b_index[b[j]] = j # we will look for the longest match we can find. max_match_length = 0 a_pos = a_start while a_pos < a_end: val = a[a_pos] b_pos = b_index.get(val) if b_pos is None: a_pos += 1 continue else: a_match_start = a_pos a_match_end = a_pos + 1 b_match_start = b_pos b_match_end = b_pos + 1 while (a_match_start > a_start and b_match_start > b_start and a[a_match_start - 1] == b[b_match_start - 1]): a_match_start -= 1 b_match_start -= 1 while (a_match_end < a_end and b_match_end < b_end and a[a_match_end] == b[b_match_end]): a_match_end += 1 b_match_end += 1 # Compute the length of the matching segment. We prefer the longest. match_length = a_match_end - a_match_start # Extra credit for matching at the beginning or end of the sequence. if a_match_start == 0 and b_match_start == 0: match_length += min_match_length if a_match_end == len(a) and b_match_end == len(b): match_length += min_match_length if match_length > max_match_length: max_match_length = match_length best_match = (a_match_start, a_match_end, b_match_start, b_match_end) # advance a_pos to the end of this match to avoid wasting time # rediscovering this match. a_pos = a_match_end if max_match_length < min_match_length or max_recursion_depth == 0: return [("diff", a_start, a_end, b_start, b_end)] a_match_start, a_match_end, b_match_start, b_match_end = best_match return (fast_match_sequences( a, b, a_start, a_match_start, b_start, b_match_start, min_match_length, max_recursion_depth - 1) + [ ("equal", a_match_start, a_match_end, b_match_start, b_match_end) ] + fast_match_sequences(a, b, a_match_end, a_end, b_match_end, b_end, min_match_length, max_recursion_depth - 1))
Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0.
def begin(self): """Load variables from checkpoint. New model variables have the following name foramt: new_model_scope/old_model_scope/xxx/xxx:0 To find the map of name to variable, need to strip the new_model_scope and then match the old_model_scope and remove the suffix :0. """ variables_to_restore = tf.contrib.framework.get_variables_to_restore( include=self._include, exclude=self._exclude) # remove new_model_scope from variable name prefix assignment_map = {variable.name[len(self._new_model_scope):]: variable for variable in variables_to_restore if variable.name.startswith(self._new_model_scope)} # remove :0 from variable name suffix assignment_map = {name.split(":")[0]: variable for name, variable in six.iteritems(assignment_map) if name.startswith(self._old_model_scope)} self._assignment_map = assignment_map tf.logging.info("restoring %d variables from checkpoint %s"%( len(assignment_map), self._checkpoint_path)) tf.train.init_from_checkpoint(self._checkpoint_path, self._assignment_map)
Creates a TimeStep with both rewards and actions as optional.
def create_time_step(cls, observation=None, done=False, raw_reward=None, processed_reward=None, action=None): """Creates a TimeStep with both rewards and actions as optional.""" return cls(observation, done, raw_reward, processed_reward, action)
Complete attention layer with preprocessing.
def attention(targets_shifted, inputs_encoded, norm_fn, hparams, bias=None): """Complete attention layer with preprocessing.""" separabilities = [hparams.separability, hparams.separability] if hparams.separability < 0: separabilities = [hparams.separability - 1, hparams.separability] targets_timed = common_layers.subseparable_conv_block( common_layers.add_timing_signal(targets_shifted), hparams.hidden_size, [((1, 1), (5, 1)), ((4, 1), (5, 1))], normalizer_fn=norm_fn, padding="LEFT", separabilities=separabilities, name="targets_time") if hparams.attention_type == "transformer": targets_timed = tf.squeeze(targets_timed, 2) target_shape = tf.shape(targets_timed) targets_segment = tf.zeros([target_shape[0], target_shape[1]]) target_attention_bias = common_attention.attention_bias( targets_segment, targets_segment, lower_triangular=True) inputs_attention_bias = tf.zeros([ tf.shape(inputs_encoded)[0], hparams.num_heads, tf.shape(targets_segment)[1], tf.shape(inputs_encoded)[1] ]) qv = common_attention.multihead_attention( targets_timed, None, target_attention_bias, hparams.hidden_size, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="self_attention") qv = common_attention.multihead_attention( qv, inputs_encoded, inputs_attention_bias, hparams.hidden_size, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, name="encdec_attention") return tf.expand_dims(qv, 2) elif hparams.attention_type == "simple": targets_with_attention = common_layers.simple_attention( targets_timed, inputs_encoded, bias=bias) return norm_fn(targets_shifted + targets_with_attention, name="attn_norm")