INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Default output directory.
def _default_output_dir(): """Default output directory.""" try: dataset_name = gin.query_parameter("inputs.dataset_name") except ValueError: dataset_name = "random" dir_name = "{model_name}_{dataset_name}_{timestamp}".format( model_name=gin.query_parameter("train.model").configurable.name, dataset_name=dataset_name, timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"), ) dir_path = os.path.join("~", "trax", dir_name) print() trax.log("No --output_dir specified") return dir_path
Setup gin configuration.
def _setup_gin(): """Setup gin configuration.""" # Imports for configurables # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable from tensor2tensor.trax import models as _trax_models from tensor2tensor.trax import optimizers as _trax_opt # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable configs = FLAGS.config or [] # Override with --dataset and --model if FLAGS.dataset: configs.append("inputs.dataset_name='%s'" % FLAGS.dataset) if FLAGS.data_dir: configs.append("inputs.data_dir='%s'" % FLAGS.data_dir) if FLAGS.model: configs.append("[email protected].%s" % FLAGS.model) gin.parse_config_files_and_bindings(FLAGS.config_file, configs)
Return train and evaluation datasets, feature info and supervised keys. Args: dataset_name: a string, the name of the dataset; if it starts with "v1_" then we'll search T2T Problem registry for it, otherwise we assume it is a dataset from TFDS and load it from there. data_dir: directory where the data is located. Returns: a 4-tuple consisting of: * the train tf.data.Dataset * the eval tf.data.Dataset * information about features: a python dictionary with feature names as keys and an object as value that provides .shape and .num_classes. * supervised_keys: information what's the input and what's the target, ie., a pair of lists with input and target feature names.
def train_and_eval_dataset(dataset_name, data_dir): """Return train and evaluation datasets, feature info and supervised keys. Args: dataset_name: a string, the name of the dataset; if it starts with "v1_" then we'll search T2T Problem registry for it, otherwise we assume it is a dataset from TFDS and load it from there. data_dir: directory where the data is located. Returns: a 4-tuple consisting of: * the train tf.data.Dataset * the eval tf.data.Dataset * information about features: a python dictionary with feature names as keys and an object as value that provides .shape and .num_classes. * supervised_keys: information what's the input and what's the target, ie., a pair of lists with input and target feature names. """ if dataset_name.startswith("v1_"): return _train_and_eval_dataset_v1(dataset_name[3:], data_dir) dataset_builder = tfds.builder(dataset_name, data_dir=data_dir) info = dataset_builder.info splits = dataset_builder.info.splits if tfds.Split.TRAIN not in splits: raise ValueError("To train we require a train split in the dataset.") if tfds.Split.VALIDATION not in splits and "test" not in splits: raise ValueError("We require a validation or test split in the dataset.") eval_split = tfds.Split.VALIDATION if tfds.Split.VALIDATION not in splits: eval_split = tfds.Split.TEST train, valid = tfds.load( name=dataset_name, split=[tfds.Split.TRAIN, eval_split]) keys = None if info.supervised_keys: keys = ([info.supervised_keys[0]], [info.supervised_keys[1]]) return train, valid, info.features, keys
Create an info-like tuple for feature given some shapes and vocab size.
def _make_info(shape_list, num_classes): """Create an info-like tuple for feature given some shapes and vocab size.""" feature_info = collections.namedtuple("FeatureInfo", ["shape", "num_classes"]) cur_shape = list(shape_list[0]) # We need to merge the provided shapes, put None where they disagree. for shape in shape_list: if len(shape) != len(cur_shape): raise ValueError("Shapes need to have the same number of dimensions.") for i in range(len(shape)): if cur_shape[i] is not None: if shape[i] != cur_shape[i]: cur_shape[i] = None return feature_info(cur_shape, num_classes)
Select a subset of features from the example dict.
def _select_features(example, feature_list=None): """Select a subset of features from the example dict.""" feature_list = feature_list or ["inputs", "targets"] return {f: example[f] for f in feature_list}
Return train and evaluation datasets, feature info and supervised keys.
def _train_and_eval_dataset_v1(problem_name, data_dir): """Return train and evaluation datasets, feature info and supervised keys.""" problem = problems.problem(problem_name) train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir) train_dataset = train_dataset.map(_select_features) eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir) eval_dataset = eval_dataset.map(_select_features) supervised_keys = (["inputs"], ["targets"]) hparams = problem.get_hparams() # We take a few training examples to guess the shapes. input_shapes, target_shapes = [], [] for example in train_dataset.take(3): input_shapes.append(example["inputs"].shape.as_list()) target_shapes.append(example["targets"].shape.as_list()) input_vocab_size = hparams.vocab_size["inputs"] target_vocab_size = hparams.vocab_size["targets"] input_info = _make_info(input_shapes, input_vocab_size) target_info = _make_info(target_shapes, target_vocab_size) info = {"inputs": input_info, "targets": target_info} return train_dataset, eval_dataset, info, supervised_keys
Batching function.
def batch_fn(dataset, training, shapes, target_names, batch_size=32, eval_batch_size=32, bucket_batch_length=32, bucket_max_length=256, bucket_min_length=8, bucket_length_step=1.1, buckets=None): """Batching function.""" del target_names # If bucketing is not specified, check if target shapes are variable. cur_batch_size = batch_size if training else eval_batch_size if buckets is None: variable_target_shapes = False target_shape = shapes[1] for dim in target_shape: if dim is None: variable_target_shapes = True tf.logging.info("Heuristically setting bucketing to %s based on shapes " "of target tensors." % variable_target_shapes) if variable_target_shapes: batch_size_per_token = cur_batch_size * bucket_batch_length scheme = data_reader.batching_scheme(batch_size_per_token, bucket_max_length, bucket_min_length, bucket_length_step, drop_long_sequences=training) buckets = (scheme["boundaries"], scheme["batch_sizes"]) if buckets: tf.logging.info("Bucketing with buckets %s." % str(buckets)) def example_length(_, target): return tf.shape(target)[0] boundaries, batch_sizes = buckets dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length( example_length, boundaries, batch_sizes)) else: dataset = dataset.padded_batch(cur_batch_size, shapes) return dataset
Shuffle and batch the given dataset.
def shuffle_and_batch_data(dataset, target_names, features_info, training): """Shuffle and batch the given dataset.""" def append_targets(example): """Append targets to the example dictionary. Needed for Keras.""" if len(target_names) == 1: return (example, example[target_names[0]]) targets = {} for name in target_names: targets[name] = example[name] return (example, targets) dataset = dataset.map(append_targets) if training: dataset = dataset.repeat() shapes = {k: features_info[k].shape for k in features_info} shapes = (shapes, shapes[target_names[0]]) dataset = dataset.shuffle(128) dataset = preprocess_fn(dataset, training) dataset = batch_fn(dataset, training, shapes, target_names) return dataset.prefetch(8)
Compile the model in Keras.
def optimize_fn(model, optimizer=None, learning_rate_schedule=None, loss=None, metrics=None): """Compile the model in Keras.""" learning_rate_schedule = learning_rate_schedule or T2TLearningRateSchedule() if optimizer: optimizer = optimizer(learning_rate=learning_rate_schedule) else: # We use Adam by default with adjusted parameters. optimizer = tf.keras.optimizers.Adam( learning_rate=learning_rate_schedule, beta_1=0.9, beta_2=0.997, epsilon=1e-9) metrics = metrics or [tf.keras.metrics.sparse_categorical_accuracy] def xent_loss(y, x): return tf.keras.backend.sparse_categorical_crossentropy( y, x, from_logits=True) loss = loss or xent_loss return model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
Train the given model on the given dataset. Args: data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. model_class: The model class to train. dataset: The name of the dataset to train on. input_names: List of strings with the names of the features on input. target_names: List of strings with the names of the target features. train_steps: for how many steps to train. eval_steps: for how many steps to do evaluation. eval_frequency: how often (every this many steps) to run evaluation.
def train_fn(data_dir=None, output_dir=None, model_class=gin.REQUIRED, dataset=gin.REQUIRED, input_names=None, target_names=None, train_steps=1000, eval_steps=1, eval_frequency=100): """Train the given model on the given dataset. Args: data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. model_class: The model class to train. dataset: The name of the dataset to train on. input_names: List of strings with the names of the features on input. target_names: List of strings with the names of the target features. train_steps: for how many steps to train. eval_steps: for how many steps to do evaluation. eval_frequency: how often (every this many steps) to run evaluation. """ train_data, eval_data, features_info, keys = train_and_eval_dataset( dataset, data_dir) if input_names is None: input_names = keys[0] if target_names is None: target_names = keys[1] # TODO(lukaszkaiser): The use of distribution strategy below fails like this: # .../keras/models.py", line 93, in _clone_functional_model # for layer in model._input_layers: # AttributeError: 'BasicFcRelu' object has no attribute '_input_layers' # strategy = tf.distribute.MirroredStrategy() # with strategy.scope(): model = model_class(features_info=features_info, input_names=input_names, target_names=target_names) optimize_fn(model) train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False) # Need to run one training step just to get optimizer variables to load. model.fit(train_batches, epochs=1, steps_per_epoch=1) # Training loop. callbacks = [] callbacks.append(tf.keras.callbacks.History()) callbacks.append(tf.keras.callbacks.BaseLogger()) last_epoch = 0 if output_dir is not None: callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir)) output_format = os.path.join(output_dir, "model-{epoch:05d}") callbacks.append(tf.keras.callbacks.ModelCheckpoint( filepath=output_format, save_weights_only=True)) checkpoints = tf.gfile.Glob(os.path.join(output_dir, "model-*")) # Take basenames and strip the "model-" prefix. checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints] # Get epoch numbers from the filenames and sort to obtain last epoch. epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if len(ckpt) > 4] epoch_numbers.sort() if epoch_numbers: last_epoch = epoch_numbers[-1] saved_path = os.path.join(output_dir, "model-%05d" % last_epoch) model.load_weights(saved_path) model.fit(train_batches, epochs=train_steps // eval_frequency, steps_per_epoch=eval_frequency, validation_data=eval_batches, validation_steps=eval_steps, initial_epoch=last_epoch, callbacks=callbacks)
Main function to train the given model on the given dataset. Args: model_name: The name of the model to train. dataset_name: The name of the dataset to train on. data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. config_file: the gin configuration file to use. config: string (in gin format) to override gin parameters.
def t2t_train(model_name, dataset_name, data_dir=None, output_dir=None, config_file=None, config=None): """Main function to train the given model on the given dataset. Args: model_name: The name of the model to train. dataset_name: The name of the dataset to train on. data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. config_file: the gin configuration file to use. config: string (in gin format) to override gin parameters. """ if model_name not in _MODEL_REGISTRY: raise ValueError("Model %s not in registry. Available models:\n * %s." % (model_name, "\n * ".join(_MODEL_REGISTRY.keys()))) model_class = _MODEL_REGISTRY[model_name]() gin.bind_parameter("train_fn.model_class", model_class) gin.bind_parameter("train_fn.dataset", dataset_name) gin.parse_config_files_and_bindings(config_file, config) # TODO(lukaszkaiser): save gin config in output_dir if provided? train_fn(data_dir, output_dir=output_dir)
Decode from estimator. Interactive, from file, or from dataset.
def decode(estimator, hparams, decode_hp): """Decode from estimator. Interactive, from file, or from dataset.""" if FLAGS.decode_interactive: if estimator.config.use_tpu: raise ValueError("TPU can only decode from dataset.") decoding.decode_interactively(estimator, hparams, decode_hp, checkpoint_path=FLAGS.checkpoint_path) elif FLAGS.decode_from_file: decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams, decode_hp, FLAGS.decode_to_file, checkpoint_path=FLAGS.checkpoint_path) if FLAGS.checkpoint_path and FLAGS.keep_timestamp: ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index") os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time)) else: decoding.decode_from_dataset( estimator, FLAGS.problem, hparams, decode_hp, decode_to_file=FLAGS.decode_to_file, dataset_split="test" if FLAGS.eval_use_test_set else None, checkpoint_path=FLAGS.checkpoint_path)
Score each line in a file and return the scores.
def score_file(filename): """Score each line in a file and return the scores.""" # Prepare model. hparams = create_hparams() encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir) has_inputs = "inputs" in encoders # Prepare features for feeding into the model. if has_inputs: inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. if has_inputs: features = {"inputs": batch_inputs, "targets": batch_targets} else: features = {"targets": batch_targets} # Prepare the model and the graph when model runs on features. model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL) _, losses = model(features) saver = tf.train.Saver() with tf.Session() as sess: # Load weights from checkpoint. if FLAGS.checkpoint_path is None: ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir) ckpt = ckpts.model_checkpoint_path else: ckpt = FLAGS.checkpoint_path saver.restore(sess, ckpt) # Run on each line. with tf.gfile.Open(filename) as f: lines = f.readlines() results = [] for line in lines: tab_split = line.split("\t") if len(tab_split) > 2: raise ValueError("Each line must have at most one tab separator.") if len(tab_split) == 1: targets = tab_split[0].strip() else: targets = tab_split[1].strip() inputs = tab_split[0].strip() # Run encoders and append EOS symbol. targets_numpy = encoders["targets"].encode( targets) + [text_encoder.EOS_ID] if has_inputs: inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID] # Prepare the feed. if has_inputs: feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} else: feed = {targets_ph: targets_numpy} # Get the score. np_loss = sess.run(losses["training"], feed) results.append(np_loss) return results
Put time dimension on channels in an embedded video.
def time_to_channels(embedded_video): """Put time dimension on channels in an embedded video.""" video_shape = common_layers.shape_list(embedded_video) if len(video_shape) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(video_shape)) transposed = tf.transpose(embedded_video, [0, 2, 3, 1, 4]) return tf.reshape(transposed, [ video_shape[0], video_shape[2], video_shape[3], video_shape[1] * video_shape[4] ])
Basic autoencoder model.
def autoencoder_basic(): """Basic autoencoder model.""" hparams = common_hparams.basic_params1() hparams.optimizer = "adam" hparams.learning_rate_constant = 0.0002 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup" hparams.label_smoothing = 0.0 hparams.batch_size = 128 hparams.hidden_size = 64 hparams.num_hidden_layers = 5 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.kernel_height = 4 hparams.kernel_width = 4 hparams.dropout = 0.05 hparams.add_hparam("max_hidden_size", 1024) hparams.add_hparam("bottleneck_bits", 128) hparams.add_hparam("bottleneck_shared_bits", 0) hparams.add_hparam("bottleneck_shared_bits_start_warmup", 0) hparams.add_hparam("bottleneck_shared_bits_stop_warmup", 0) hparams.add_hparam("bottleneck_noise", 0.1) hparams.add_hparam("bottleneck_warmup_steps", 2000) hparams.add_hparam("sample_height", 32) hparams.add_hparam("sample_width", 32) hparams.add_hparam("discriminator_batchnorm", True) hparams.add_hparam("num_sliced_vecs", 20000) hparams.add_hparam("sliced_do_tanh", int(True)) hparams.add_hparam("discriminator_size", 256) hparams.add_hparam("discriminator_kernel_size", 6) hparams.add_hparam("discriminator_strides", 4) hparams.add_hparam("discriminator_pure_mean", int(False)) hparams.add_hparam("code_loss_factor", 1.0) hparams.add_hparam("gan_codes_warmup_steps", 16000) hparams.add_hparam("gan_loss_factor", 0.0) hparams.add_hparam("bottleneck_l2_factor", 0.05) hparams.add_hparam("gumbel_temperature", 0.5) hparams.add_hparam("gumbel_noise_factor", 0.5) hparams.add_hparam("vq_temperature", 0.001) hparams.add_hparam("use_vq_loss", int(False)) hparams.add_hparam("discriminator", "double") return hparams
Autoregressive autoencoder model.
def autoencoder_autoregressive(): """Autoregressive autoencoder model.""" hparams = autoencoder_basic() hparams.add_hparam("autoregressive_forget_base", False) hparams.add_hparam("autoregressive_mode", "none") hparams.add_hparam("autoregressive_decode_steps", 0) hparams.add_hparam("autoregressive_eval_pure_autoencoder", False) hparams.add_hparam("autoregressive_gumbel_sample", False) return hparams
Residual autoencoder model.
def autoencoder_residual(): """Residual autoencoder model.""" hparams = autoencoder_autoregressive() hparams.optimizer = "Adafactor" hparams.clip_grad_norm = 1.0 hparams.learning_rate_constant = 0.5 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay" hparams.num_hidden_layers = 5 hparams.hidden_size = 64 hparams.max_hidden_size = 1024 hparams.add_hparam("num_residual_layers", 2) hparams.add_hparam("residual_kernel_height", 3) hparams.add_hparam("residual_kernel_width", 3) hparams.add_hparam("residual_filter_multiplier", 2.0) hparams.add_hparam("residual_dropout", 0.2) hparams.add_hparam("residual_use_separable_conv", int(True)) hparams.add_hparam("kl_beta", 1.0) return hparams
Residual autoencoder model for text.
def autoencoder_residual_text(): """Residual autoencoder model for text.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 32 hparams.batch_size = 1024 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.autoregressive_mode = "none" hparams.sample_width = 1 return hparams
Basic autoencoder model.
def autoencoder_basic_discrete(): """Basic autoencoder model.""" hparams = autoencoder_autoregressive() hparams.num_hidden_layers = 5 hparams.hidden_size = 64 hparams.bottleneck_bits = 1024 hparams.bottleneck_noise = 0.1 hparams.add_hparam("discretize_warmup_steps", 16000) return hparams
Residual discrete autoencoder model.
def autoencoder_residual_discrete(): """Residual discrete autoencoder model.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 1024 hparams.bottleneck_noise = 0.05 hparams.add_hparam("discretize_warmup_steps", 16000) hparams.add_hparam("bottleneck_kind", "tanh_discrete") hparams.add_hparam("isemhash_noise_dev", 0.5) hparams.add_hparam("isemhash_mix_prob", 0.5) hparams.add_hparam("isemhash_filter_size_multiplier", 2.0) hparams.add_hparam("vq_beta", 0.25) hparams.add_hparam("vq_decay", 0.999) hparams.add_hparam("vq_epsilon", 1e-5) return hparams
Residual discrete autoencoder model, big version.
def autoencoder_residual_discrete_big(): """Residual discrete autoencoder model, big version.""" hparams = autoencoder_residual_discrete() hparams.hidden_size = 128 hparams.max_hidden_size = 4096 hparams.bottleneck_noise = 0.1 hparams.residual_dropout = 0.4 return hparams
Ordered discrete autoencoder model.
def autoencoder_ordered_discrete(): """Ordered discrete autoencoder model.""" hparams = autoencoder_residual_discrete() hparams.bottleneck_noise = 0.05 # Use 0.8 for ordered. hparams.gan_loss_factor = 0.05 hparams.add_hparam("unordered", True) return hparams
Ordered discrete autoencoder model.
def autoencoder_ordered_discrete_image64(): """Ordered discrete autoencoder model.""" hparams = autoencoder_ordered_discrete() hparams.batch_size = 32 hparams.num_hidden_layers = 6 hparams.bottleneck_warmup_steps *= 2 hparams.gan_codes_warmup_steps *= 2 return hparams
Ordered discrete autoencoder model for text.
def autoencoder_ordered_text(): """Ordered discrete autoencoder model for text.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_bits = 1024 hparams.bottleneck_shared_bits = 1024-64 hparams.bottleneck_shared_bits_start_warmup = 75000 hparams.bottleneck_shared_bits_stop_warmup = 275000 hparams.num_hidden_layers = 7 hparams.batch_size = 1024 hparams.autoregressive_mode = "conv5" hparams.max_hidden_size = 1024 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.sample_height = 128 hparams.sample_width = 1 return hparams
Ordered discrete autoencoder model for text, small version.
def autoencoder_ordered_text_small(): """Ordered discrete autoencoder model for text, small version.""" hparams = autoencoder_ordered_text() hparams.bottleneck_bits = 32 hparams.num_hidden_layers = 3 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.autoregressive_mode = "conv5" hparams.sample_height = 4 return hparams
Discrete autoencoder model for compressing pong frames.
def autoencoder_discrete_pong(): """Discrete autoencoder model for compressing pong frames.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 3 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0.01 hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) return hparams
Discrete autoencoder model for compressing pong frames for testing.
def autoencoder_discrete_tiny(): """Discrete autoencoder model for compressing pong frames for testing.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 2 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0. hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) hparams.num_residual_layers = 1 hparams.hidden_size = 32 hparams.max_hidden_size = 64 return hparams
Discrete autoencoder model for compressing cifar.
def autoencoder_discrete_cifar(): """Discrete autoencoder model for compressing cifar.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_noise = 0.0 hparams.bottleneck_bits = 90 hparams.num_hidden_layers = 2 hparams.hidden_size = 256 hparams.num_residual_layers = 4 hparams.batch_size = 32 hparams.learning_rate_constant = 1.0 return hparams
Tuning grid of the main autoencoder params.
def autoencoder_range(rhp): """Tuning grid of the main autoencoder params.""" rhp.set_float("dropout", 0.01, 0.3) rhp.set_float("gan_loss_factor", 0.01, 0.1) rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE) rhp.set_discrete("bottleneck_warmup_steps", [200, 2000]) rhp.set_float("gumbel_temperature", 0, 1) rhp.set_float("gumbel_noise_factor", 0, 0.5)
A stack of self attention layers.
def image_encoder(image_feat, hparams, name="image_encoder", save_weights_to=None, make_image_summary=True): """A stack of self attention layers.""" x = image_feat with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, None, hparams.attention_key_channels or hparams.image_hidden_size, hparams.attention_value_channels or hparams.image_hidden_size, hparams.image_hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=None, make_image_summary=make_image_summary, dropout_broadcast_dims=None, max_length=None, vars_3d=False, scale_otproduct=hparams.scale_dotproduct) utils.collect_named_outputs("norms", "image_feat_self_attention", tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "image_feat_self_attention_zero_add", tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.image_filter_size, hparams.image_hidden_size, dropout=hparams.relu_dropout, dropout_broadcast_dims=None) utils.collect_named_outputs("norms", "image_feat_ffn", tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "image_feat_ffn_zero_add", tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
Question encoder, run LSTM encoder and get the last output as encoding.
def question_encoder(question, hparams, name="encoder"): """Question encoder, run LSTM encoder and get the last output as encoding.""" with tf.variable_scope(name, "encoder", values=[question]): question = common_layers.flatten4d3d(question) padding = common_attention.embedding_to_padding(question) length = common_attention.padding_to_length(padding) max_question_length = hparams.max_question_length question = question[:, :max_question_length, :] actual_question_length = common_layers.shape_list(question)[1] length = tf.minimum(length, max_question_length) padding = [[0, 0], [0, max_question_length-actual_question_length], [0, 0]] question = tf.pad(question, padding) question_shape = question.get_shape().as_list() question_shape[1] = max_question_length question.set_shape(question_shape) # apply tanh dropout on question embedding question = tf.tanh(question) question = tf.nn.dropout(question, keep_prob=1.-hparams.dropout) question = [question[:, i, :] for i in range(max_question_length)] # rnn_layers = [_get_rnn_cell(hparams) # for _ in range(hparams.num_rnn_layers)] # rnn_multi_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers) rnn_cell = _get_rnn_cell(hparams) # outputs, _ = tf.nn.dynamic_rnn( # rnn_cell, question, length, dtype=tf.float32) _, state = tf.nn.static_rnn(rnn_cell, question, sequence_length=length, dtype=tf.float32) # outputs = [tf.expand_dims(output, axis=1) for output in outputs] # outputs = tf.concat(outputs, axis=1) # utils.collect_named_outputs("vqa_attention_debug", "question_output", # outputs) # utils.collect_named_outputs("vqa_attention_debug", "question_state", # state.h) # batch_size = common_layers.shape_list(outputs)[0] # row_indices = tf.range(batch_size) # # length - 1 as index # indices = tf.transpose([row_indices, tf.maximum(length-1, 0)]) # last_output = tf.gather_nd(outputs, indices) # utils.collect_named_outputs("vqa_attention_debug", # "question_final_output", last_output) return state.h
Attention on image feature with question as query.
def attn(image_feat, query, hparams, name="attn"): """Attention on image feature with question as query.""" with tf.variable_scope(name, "attn", values=[image_feat, query]): attn_dim = hparams.attn_dim num_glimps = hparams.num_glimps num_channels = common_layers.shape_list(image_feat)[-1] if len(common_layers.shape_list(image_feat)) == 4: image_feat = common_layers.flatten4d3d(image_feat) query = tf.expand_dims(query, 1) image_proj = common_attention.compute_attention_component( image_feat, attn_dim, name="image_proj") query_proj = common_attention.compute_attention_component( query, attn_dim, name="query_proj") h = tf.nn.relu(image_proj + query_proj) h_proj = common_attention.compute_attention_component( h, num_glimps, name="h_proj") p = tf.nn.softmax(h_proj, axis=1) image_ave = tf.matmul(image_feat, p, transpose_a=True) image_ave = tf.reshape(image_ave, [-1, num_channels*num_glimps]) return image_ave
Multi layer perceptron with dropout and relu activation.
def mlp(feature, hparams, name="mlp"): """Multi layer perceptron with dropout and relu activation.""" with tf.variable_scope(name, "mlp", values=[feature]): num_mlp_layers = hparams.num_mlp_layers mlp_dim = hparams.mlp_dim for _ in range(num_mlp_layers): feature = common_layers.dense(feature, mlp_dim, activation=tf.nn.relu) feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout) return feature
VQA attention baseline hparams.
def vqa_attention_base(): """VQA attention baseline hparams.""" hparams = common_hparams.basic_params1() hparams.batch_size = 128 hparams.use_fixed_batch_size = True, hparams.optimizer = "adam" hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.999 hparams.optimizer_adam_epsilon = 1e-8 hparams.weight_decay = 0. hparams.clip_grad_norm = 0. hparams.initializer = "xavier" hparams.learning_rate = 0.5 hparams.learning_rate_schedule = "legacy" hparams.learning_rate_warmup_steps = 0 hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate_decay_rate = 0.5 hparams.learning_rate_decay_steps = 50000 hparams.dropout = 0.5 hparams.summarize_grads = True hparams.summarize_vars = True # not used hparams hparams.label_smoothing = 0. hparams.multiply_embedding_mode = "" # add new hparams # preprocess hparams.add_hparam("resize_side", 512) hparams.add_hparam("height", 448) hparams.add_hparam("width", 448) hparams.add_hparam("distort", True) hparams.add_hparam("train_resnet", False) hparams.add_hparam("rnn_type", "lstm") hparams.add_hparam("num_rnn_layers", 1) hparams.add_hparam("max_question_length", 15) # lstm hidden size hparams.hidden_size = 512 hparams.add_hparam("attn_dim", 512) hparams.add_hparam("num_glimps", 2) hparams.add_hparam("num_mlp_layers", 1) hparams.add_hparam("mlp_dim", 1024) hparams.add_hparam("image_input_type", "image") hparams.add_hparam("image_model_fn", "resnet_v1_152") hparams.add_hparam("image_feat_size", 0) # self attention parts hparams.norm_type = "layer" hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.layer_prepostprocess_dropout = 0.3 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 hparams.image_hidden_size = 2048 hparams.add_hparam("num_encoder_layers", 1) # Attention-related flags. hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("image_filter_size", 1024) hparams.add_hparam("self_attention_type", "dot_product") hparams.add_hparam("scale_dotproduct", True) return hparams
Small range of hyperparameters.
def vqa_attention_base_range(rhp): """Small range of hyperparameters.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.1, 1.0, scale=rhp.LOG_SCALE) rhp.set_float("clip_grad_norm", 0.1, 10, scale=rhp.LOG_SCALE) rhp.set_discrete("batch_size", [128, 256, 512, 1024]) rhp.set_float("weight_decay", 0.0, 1e-4) rhp.set_categorical("rnn_type", ["lstm", "lstm_layernorm"])
Append (step, value) pair to history for the given mode and metric.
def append(self, mode, metric, step, value): """Append (step, value) pair to history for the given mode and metric.""" if mode not in self._values: self._values[mode] = collections.defaultdict(list) self._values[mode][metric].append((step, value))
Get the history for the given metric and mode.
def get(self, mode, metric): """Get the history for the given metric and mode.""" if mode not in self._values: logging.info("Metric %s not found for mode %s", metric, mode) return [] return list(self._values[mode][metric])
Metrics available for a given mode.
def metrics_for_mode(self, mode): """Metrics available for a given mode.""" if mode not in self._values: logging.info("Mode %s not found", mode) return [] return sorted(list(self._values[mode].keys()))
Performs a batch normalization followed by a ReLU. Args: inputs: `Tensor` of shape `[batch, channels, ...]`. is_training: `bool` for whether the model is training. relu: `bool` if False, omits the ReLU operation. init_zero: `bool` if True, initializes scale parameter of batch normalization with 0 instead of 1 (default). data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A normalized `Tensor` with the same `data_format`.
def batch_norm_relu(inputs, is_training, relu=True, init_zero=False, data_format="channels_first"): """Performs a batch normalization followed by a ReLU. Args: inputs: `Tensor` of shape `[batch, channels, ...]`. is_training: `bool` for whether the model is training. relu: `bool` if False, omits the ReLU operation. init_zero: `bool` if True, initializes scale parameter of batch normalization with 0 instead of 1 (default). data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A normalized `Tensor` with the same `data_format`. """ if init_zero: gamma_initializer = tf.zeros_initializer() else: gamma_initializer = tf.ones_initializer() if data_format == "channels_first": axis = 1 else: axis = 3 inputs = layers().BatchNormalization( axis=axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, center=True, scale=True, fused=True, gamma_initializer=gamma_initializer)(inputs, training=is_training) if relu: inputs = tf.nn.relu(inputs) return inputs
Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid.
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None, is_training=None): """Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid. """ if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format=data_format) if use_td: inputs_shape = common_layers.shape_list(inputs) if use_td == "weight": if data_format == "channels_last": size = kernel_size * kernel_size * inputs_shape[-1] else: size = kernel_size * kernel_size * inputs_shape[1] targeting_count = targeting_rate * tf.to_float(size) targeting_fn = common_layers.weight_targeting elif use_td == "unit": targeting_count = targeting_rate * filters targeting_fn = common_layers.unit_targeting else: raise Exception("Unrecognized targeted dropout type: %s" % use_td) y = common_layers.td_conv( inputs, filters, kernel_size, targeting_count, targeting_fn, keep_prob, is_training, do_prune=True, strides=strides, padding=("SAME" if strides == 1 else "VALID"), data_format=data_format, use_bias=False, kernel_initializer=tf.variance_scaling_initializer()) else: y = layers().Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=("SAME" if strides == 1 else "VALID"), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format)(inputs) return y
Standard building block for residual networks with BN before convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: unused parameter to keep the same function signature as `bottleneck_block`. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block.
def residual_block(inputs, filters, is_training, projection_shortcut, strides, final_block, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Standard building block for residual networks with BN before convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: unused parameter to keep the same function signature as `bottleneck_block`. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block. """ del final_block shortcut = inputs inputs = batch_norm_relu(inputs, is_training, data_format=data_format) if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) return inputs + shortcut
Bottleneck block variant for residual networks with BN after convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: `bool` set to True if it is this the final block in the group. This is changes the behavior of batch normalization initialization for the final batch norm in a block. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block.
def bottleneck_block(inputs, filters, is_training, projection_shortcut, strides, final_block, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Bottleneck block variant for residual networks with BN after convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: `bool` set to True if it is this the final block in the group. This is changes the behavior of batch normalization initialization for the final batch norm in a block. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block. """ # TODO(chrisying): this block is technically the post-activation resnet-v1 # bottleneck unit. Test with v2 (pre-activation) and replace if there is no # difference for consistency. shortcut = inputs if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu( inputs, is_training, relu=False, init_zero=final_block, data_format=data_format) return tf.nn.relu(inputs + shortcut)
Creates one layer of blocks for the ResNet model. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first convolution of the layer. block_fn: `function` for the block to use within the model blocks: `int` number of blocks contained in the layer. strides: `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. is_training: `bool` for whether the model is training. name: `str`name for the Tensor output of the block layer. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block layer.
def block_layer(inputs, filters, block_fn, blocks, strides, is_training, name, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Creates one layer of blocks for the ResNet model. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first convolution of the layer. block_fn: `function` for the block to use within the model blocks: `int` number of blocks contained in the layer. strides: `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. is_training: `bool` for whether the model is training. name: `str`name for the Tensor output of the block layer. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block layer. """ # Bottleneck blocks end with 4x the number of filters as they start with filters_out = 4 * filters if block_fn is bottleneck_block else filters def projection_shortcut(inputs): """Project identity branch.""" inputs = conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) return batch_norm_relu( inputs, is_training, relu=False, data_format=data_format) # Only the first block per block_layer uses projection_shortcut and strides inputs = block_fn( inputs, filters, is_training, projection_shortcut, strides, False, data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) for i in range(1, blocks): inputs = block_fn( inputs, filters, is_training, None, 1, (i + 1 == blocks), data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return tf.identity(inputs, name)
Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations.
def resnet_v2(inputs, block_fn, layer_blocks, filters, data_format="channels_first", is_training=False, is_cifar=False, use_td=False, targeting_rate=None, keep_prob=None): """Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations. """ inputs = block_layer( inputs=inputs, filters=filters[1], block_fn=block_fn, blocks=layer_blocks[0], strides=1, is_training=is_training, name="block_layer1", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[2], block_fn=block_fn, blocks=layer_blocks[1], strides=2, is_training=is_training, name="block_layer2", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[3], block_fn=block_fn, blocks=layer_blocks[2], strides=2, is_training=is_training, name="block_layer3", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) if not is_cifar: inputs = block_layer( inputs=inputs, filters=filters[4], block_fn=block_fn, blocks=layer_blocks[3], strides=2, is_training=is_training, name="block_layer4", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return inputs
Set of hyperparameters.
def resnet_imagenet_34_td_weight_05_05(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "weight" hp.targeting_rate = 0.5 hp.keep_prob = 0.5 return hp
Set of hyperparameters.
def resnet_imagenet_34_td_unit_05_05(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "unit" hp.targeting_rate = 0.5 hp.keep_prob = 0.5 return hp
Set of hyperparameters.
def resnet_imagenet_34_td_unit_no_drop(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "unit" hp.targeting_rate = 0.0 hp.keep_prob = 1.0 return hp
Set of hyperparameters.
def resnet_cifar_15(): """Set of hyperparameters.""" hp = resnet_base() hp.block_fn = "residual" hp.is_cifar = True hp.layer_sizes = [2, 2, 2] hp.filter_sizes = [16, 32, 64, 128] return hp
Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y
def _len_lcs(x, y): """Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y """ table = _lcs(x, y) n, m = len(x), len(y) return table[n, m]
Computes the length of the LCS between two seqs. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs
def _lcs(x, y): """Computes the length of the LCS between two seqs. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs """ n, m = len(x), len(y) table = {} for i in range(n + 1): for j in range(m + 1): if i == 0 or j == 0: table[i, j] = 0 elif x[i - 1] == y[j - 1]: table[i, j] = table[i - 1, j - 1] + 1 else: table[i, j] = max(table[i - 1, j], table[i, j - 1]) return table
Computes ROUGE-L (sentence level) of two collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Calculated according to: R_lcs = LCS(X,Y)/m P_lcs = LCS(X,Y)/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: X = reference summary Y = Candidate summary m = length of reference summary n = length of candidate summary Args: eval_sentences: The sentences that have been picked by the summarizer ref_sentences: The sentences from the reference set Returns: A float: F_lcs
def rouge_l_sentence_level(eval_sentences, ref_sentences): """Computes ROUGE-L (sentence level) of two collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Calculated according to: R_lcs = LCS(X,Y)/m P_lcs = LCS(X,Y)/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: X = reference summary Y = Candidate summary m = length of reference summary n = length of candidate summary Args: eval_sentences: The sentences that have been picked by the summarizer ref_sentences: The sentences from the reference set Returns: A float: F_lcs """ f1_scores = [] for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): m = len(ref_sentence) n = len(eval_sentence) lcs = _len_lcs(eval_sentence, ref_sentence) f1_scores.append(_f_lcs(lcs, m, n)) return np.mean(f1_scores, dtype=np.float32)
ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 score.
def rouge_l_fscore(predictions, labels, **unused_kwargs): """ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), tf.float32) return rouge_l_f_score, tf.constant(1.0)
Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams
def _get_ngrams(n, text): """Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams """ ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set
ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score.
def rouge_2_fscore(predictions, labels, **unused_kwargs): """ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32) return rouge_2_f_score, tf.constant(1.0)
Normalize the examples from different tasks so they can be merged. This function is specific to NLP tasks and normalizes them so that in the end the example only has "targets" and "task_id". For tasks that originally have inputs, this is done by appending task_id to the inputs and prepending targets, so normalized_targets = inputs task_id targets. For classification tasks, targets are constructed by spelling out the class. Args: task: the Problem class of the task we are normalizing. example: a dictionary of tensors, the example to normalize. is_infer: bool, whether we are performing inference or not. vocab_type: the type of vocabulary in use. vocab_offset: integer, offset index for subword vocabularies. max_input_length: maximum length to cut inputs to. max_target_length: maximum length to cut targets to. fixed_train_length: set length to this size if > 0. Returns: a dictionary of tensors, like example, after normalizing, which in this case means that it only has "targets" and "task_id" as feature.
def normalize_example_nlp(task, example, is_infer, vocab_type, vocab_offset, max_input_length, max_target_length, fixed_train_length): """Normalize the examples from different tasks so they can be merged. This function is specific to NLP tasks and normalizes them so that in the end the example only has "targets" and "task_id". For tasks that originally have inputs, this is done by appending task_id to the inputs and prepending targets, so normalized_targets = inputs task_id targets. For classification tasks, targets are constructed by spelling out the class. Args: task: the Problem class of the task we are normalizing. example: a dictionary of tensors, the example to normalize. is_infer: bool, whether we are performing inference or not. vocab_type: the type of vocabulary in use. vocab_offset: integer, offset index for subword vocabularies. max_input_length: maximum length to cut inputs to. max_target_length: maximum length to cut targets to. fixed_train_length: set length to this size if > 0. Returns: a dictionary of tensors, like example, after normalizing, which in this case means that it only has "targets" and "task_id" as feature. """ if task.has_inputs: example["inputs"] = example["inputs"][:-1] # remove EOS token if hasattr(task, "class_labels"): if vocab_type == text_problems.VocabType.CHARACTER: # TODO(urvashik): handle the case where num_labels > 9 example["targets"] = tf.cast(discretization.int_to_bit( example["targets"], 1, base=10) + 50, tf.int64) example["targets"] = tf.squeeze(example["targets"], axis=[-1]) elif vocab_type == text_problems.VocabType.SUBWORD: example["targets"] = vocab_offset + example["targets"] else: # sequence with inputs and targets eg: summarization if task.has_inputs: if max_input_length > 0: example["inputs"] = example["inputs"][:max_input_length] # Do not truncate targets during inference with beam decoding. if max_target_length > 0 and not is_infer: example["targets"] = example["targets"][:max_target_length] def make_constant_shape(x, size): x = x[:size] xlen = tf.shape(x)[0] x = tf.pad(x, [[0, size - xlen]]) return tf.reshape(x, [size]) if task.has_inputs: if is_infer: concat_list = [example["inputs"], [task.task_id]] example["inputs"] = tf.concat(concat_list, axis=0) else: inputs = example.pop("inputs") concat_list = [inputs, [task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) else: concat_list = [[task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if not is_infer and fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) example["task_id"] = tf.constant([task.task_id], dtype=tf.int64) return example
A list of examples to a dataset containing mixed examples. Given a list of `n` dataset examples, flatten them by converting each element into a dataset and concatenating them to convert into a single dataset. Args: *args: A list containing one example each from `n` different datasets. Returns: flattened: A new dataset containing the examples from the list as part of a single dataset.
def flatten_zip_dataset(*args): """A list of examples to a dataset containing mixed examples. Given a list of `n` dataset examples, flatten them by converting each element into a dataset and concatenating them to convert into a single dataset. Args: *args: A list containing one example each from `n` different datasets. Returns: flattened: A new dataset containing the examples from the list as part of a single dataset. """ flattened = tf.data.Dataset.from_tensors(args[0]) for ex in args[1:]: flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex)) return flattened
Multiproblem loss function.
def aggregate_task_losses(hparams, problem_hparams, logits, feature_name, feature): """Multiproblem loss function.""" # If no reweighting, we want the default loss to mimic the LM loss. if not hparams.multiproblem_reweight_label_loss: return aggregate_task_lm_losses(hparams=hparams, problem_hparams=problem_hparams, logits=logits, feature_name=feature_name, feature=feature) summaries = [] main_task_id = hparams.problem.task_list[0].task_id vocab_size = problem_hparams.vocab_size[feature_name] if vocab_size is not None and hasattr(hparams, "vocab_divisor"): vocab_size += (-vocab_size) % hparams.vocab_divisor modality = problem_hparams.modality[feature_name] loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) weights_fn = hparams.weights_fn.get( feature_name, modalities.get_weights_fn(modality)) # Primary task loss loss_num, loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem_all(x, main_task_id), hparams, vocab_size, weights_fn) loss_val = loss_num / tf.maximum(1.0, loss_den) summaries.append([hparams.problem.task_list[0].name+"_loss", loss_val]) # Since the losses may undergo rescaling, they cannot exist as separate # numerators and denominators. Set the denominators to 1 in order to faciliate # loss averaging. loss_num = loss_val loss_den = tf.minimum(tf.convert_to_tensor(1, dtype=tf.float32), loss_den) for task in hparams.problem.task_list[1:]: # Loss only from the input sequence -- the auxiliary LM loss. seq_loss_num, seq_loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem_input(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size) seq_loss_num *= problem_hparams.loss_multiplier # Unscaled sequence loss. seq_loss = seq_loss_num / tf.maximum(1.0, seq_loss_den) summaries.append([task.name+"_seq_loss", seq_loss]) if hasattr(task, "num_classes"): # Loss only from the classification label. label_loss_num, label_loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size) label_loss_num *= problem_hparams.loss_multiplier # Unscaled classification label loss. label_loss = label_loss_num / tf.maximum(1.0, label_loss_den) summaries.append([task.name+"_label_loss", label_loss]) # Scaling. if hparams.multiproblem_reweight_label_loss: label_loss *= hparams.multiproblem_label_weight seq_loss *= (1 - hparams.multiproblem_label_weight) # This is the training loss for the optimizer after scaling. task_loss_val = seq_loss + label_loss loss_den_ = label_loss_den else: # Loss only from the target sequence. target_loss_num, target_loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size) target_loss_num *= problem_hparams.loss_multiplier # Unscaled target sequence loss. target_loss = target_loss_num / tf.maximum(1.0, target_loss_den) summaries.append([task.name+"_target_loss", target_loss]) # Scaling. if hparams.multiproblem_reweight_label_loss: target_loss *= hparams.multiproblem_label_weight seq_loss *= (1 - hparams.multiproblem_label_weight) # This is the training loss for the optimizer after all the scaling. task_loss_val = seq_loss + target_loss loss_den_ = target_loss_den summaries.append([task.name+"_loss", task_loss_val]) # Adding 1 to the loss den for each task leads to averaging task losses. # TODO(urvashik): Fix combination with other task losses - weighted # average based on the number of examples from that task. loss_num += task_loss_val loss_den += tf.minimum(tf.convert_to_tensor(1, dtype=tf.float32), loss_den_) return loss_num, loss_den, summaries
LM loss for multiproblems.
def aggregate_task_lm_losses(hparams, problem_hparams, logits, feature_name, feature): """LM loss for multiproblems.""" summaries = [] vocab_size = problem_hparams.vocab_size[feature_name] if vocab_size is not None and hasattr(hparams, "vocab_divisor"): vocab_size += (-vocab_size) % hparams.vocab_divisor modality = problem_hparams.modality[feature_name] loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) weights_fn = hparams.weights_fn.get( feature_name, modalities.get_weights_fn(modality)) loss_num = 0. loss_den = 0. for task in hparams.problem.task_list: loss_num_, loss_den_ = loss( logits, feature, lambda x: common_layers.weights_multi_problem_all(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size, weights_fn) loss_num += loss_num_ loss_den += loss_den_ loss_val = loss_num_ / tf.maximum(1.0, loss_den_) summaries.append([task.name+"_loss", loss_val]) return loss_num, loss_den, summaries
Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset.
def update_task_ids(self, encoder_vocab_size): """Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset. """ for idx, task in enumerate(self.task_list): task.set_task_id(idx + encoder_vocab_size) tf.logging.info("Task %d (%s) has id %d." % (idx, task.name, task.task_id))
Compute the maximum number of classes any subtask has. This is useful for modifying the size of the softmax to include the output labels for the classification tasks. Currently, labels from different tasks are overloaded. Returns: num: Highest number of output classes in any text classification sub-task within this MultiProblem.
def get_max_num_classes(self): """Compute the maximum number of classes any subtask has. This is useful for modifying the size of the softmax to include the output labels for the classification tasks. Currently, labels from different tasks are overloaded. Returns: num: Highest number of output classes in any text classification sub-task within this MultiProblem. """ num = 0 for task in self.task_list: if hasattr(task, "num_classes"): if num < task.num_classes: num = task.num_classes return num
Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias)
def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): """Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias) """ del segment return None, query_antecedent, memory_antecedent, bias
Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias)
def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): """Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias) """ assert memory_antecedent is None, "We only support language modeling" # In eval mode, batch size may be variable memory_batch_size = tf.shape(self.previous_vals)[0] current_batch_size = tf.shape(query_antecedent)[0] amount_to_pad = memory_batch_size - current_batch_size # If segment id is zero, don't attend back to the memory previous_bias = self.previous_bias[:current_batch_size, :, :, :] + tf.cast( tf.equal(segment[:, None, None, None], 0), tf.float32) * -1e9 sliced_previous_vals = self.previous_vals[:current_batch_size, :, :] new_memory_antecedent = tf.concat( [tf.stop_gradient(sliced_previous_vals), query_antecedent], 1) new_bias = tf.concat([ tf.tile(tf.stop_gradient(previous_bias), [1, 1, self.chunk_length, 1]), tf.tile(bias, [current_batch_size, 1, 1, 1]), ], -1) remember_segment = tf.pad(segment, [[0, amount_to_pad]]) # TODO(kitaev): The code assumes that we always either increment the chunk # number or reset it to zero. This assumption will not hold if we re-run the # model for each token, e.g. for autoregressive greedy/beam/sampling decode. remember_vals = tf.pad(query_antecedent, [[0, amount_to_pad], [0, 0], [0, 0]]) # Query position is on axis -2 for bias: as long as a token can be attended # to from at least one query position (i.e. it's not padding), memorize it. remember_bias = tf.tile( tf.reduce_max(bias, -2, keepdims=True), [memory_batch_size, 1, 1, 1]) # Assume that query_antecedent is always a full chunk (i.e. not truncated) if self.chunk_length < self.tokens_to_cache: remember_vals = tf.concat([self.previous_vals, remember_vals], 1) remember_bias = tf.concat([ self.previous_bias - 1e9 * tf.cast( tf.equal( tf.pad(segment, [[0, amount_to_pad]])[:, None, None, None], 0), tf.float32), remember_bias ], -1) if self.chunk_length != self.tokens_to_cache: remember_vals = remember_vals[:, -self.tokens_to_cache:, :] remember_bias = remember_bias[:, :, :, -self.tokens_to_cache:] token = (remember_segment, remember_vals, remember_bias) return token, query_antecedent, new_memory_antecedent, new_bias
Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x
def post_attention(self, token, x): """Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x """ with tf.control_dependencies([ self.previous_segment.assign(token[0]), self.previous_vals.assign(token[1]), self.previous_bias.assign(token[2]), ]): return tf.identity(x)
Compute the safe norm.
def _norm(self, x): """Compute the safe norm.""" return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7)
Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size].
def _address_content(self, x): """Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size]. """ mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_key") mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_query") norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True) dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") access_logits = self.sharpen_factor * cos_dist return access_logits
Read from the memory. An external component can use the results via a simple MLP, e.g., fn(x W_x + retrieved_mem W_m). Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: access_logits: the logits for accessing the memory in shape of [batch_size, length, memory_size]. retrieved_mem: the retrieved results in the shape of [batch_size, length, val_depth].
def read(self, x): """Read from the memory. An external component can use the results via a simple MLP, e.g., fn(x W_x + retrieved_mem W_m). Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: access_logits: the logits for accessing the memory in shape of [batch_size, length, memory_size]. retrieved_mem: the retrieved results in the shape of [batch_size, length, val_depth]. """ access_logits = self._address_content(x) weights = tf.nn.softmax(access_logits) retrieved_mem = tf.reduce_sum( tf.multiply(tf.expand_dims(weights, 3), tf.expand_dims(self.mem_vals, axis=1)), axis=2) return access_logits, retrieved_mem
Write to the memory based on a combination of similarity and least used. Based on arXiv:1607.00036v2 [cs.LG]. Args: x: a tensor in the shape of [batch_size, length, depth]. access_logits: the logits for accessing the memory. Returns: the update op.
def write(self, x, access_logits): """Write to the memory based on a combination of similarity and least used. Based on arXiv:1607.00036v2 [cs.LG]. Args: x: a tensor in the shape of [batch_size, length, depth]. access_logits: the logits for accessing the memory. Returns: the update op. """ gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name="gamma") write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1) candidate_value = tf.layers.dense(x, self.val_depth, activation=tf.nn.relu, name="candidate_value") erase_gates = tf.layers.dense(x, self.memory_size, activation=tf.nn.sigmoid, name="erase") write_weights = tf.nn.softmax(write_logits) erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3) erase = tf.multiply(erase_weights, tf.expand_dims(self.mem_vals, 1)) addition = tf.multiply( tf.expand_dims(write_weights, 3), tf.expand_dims(candidate_value, 2)) update_value_op = self.mem_vals.assign( tf.reduce_mean(erase + addition, axis=1)) with tf.control_dependencies([update_value_op]): write_op = self.mean_logits.assign( self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1)) return write_op
Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op.
def reset(self, entries_to_reset): """Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op. """ num_updates = tf.size(entries_to_reset) update_vals = tf.scatter_update( self.mem_vals, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size, self.val_depth], .0), 0), [num_updates, 1, 1])) update_logits = tf.scatter_update( self.mean_logits, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size], .0), 0), [num_updates, 1])) reset_op = tf.group([update_vals, update_logits]) return reset_op
Called prior to self-attention, to incorporate memory items. Args: segment_number: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias)
def pre_attention(self, segment_number, query_antecedent, memory_antecedent, bias): """Called prior to self-attention, to incorporate memory items. Args: segment_number: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias) """ with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE): assert memory_antecedent is None, "We only support language modeling" with tf.control_dependencies([ tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]): difference = self.batch_size - tf.size(segment_number) segment_number = tf.pad(segment_number, [[0, difference]]) reset_op = self.reset(tf.reshape(tf.where( tf.less(segment_number, self.segment_number)), [-1])) memory_results = {} with tf.control_dependencies([reset_op]): with tf.control_dependencies([ self.update_segment_number(segment_number)]): x = tf.pad(query_antecedent, [ [0, difference], [0, 0], [0, 0]]) access_logits, retrieved_mem = self.read(x) memory_results["x"] = x memory_results["access_logits"] = access_logits memory_results["retrieved_mem"] = retrieved_mem return memory_results, query_antecedent, memory_antecedent, bias
Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x
def post_attention(self, token, x): """Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x """ with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE): depth = common_layers.shape_list(x)[-1] actual_batch_size = common_layers.shape_list(x)[0] memory_output = tf.gather(token["retrieved_mem"], tf.range(actual_batch_size)) output = tf.add(tf.layers.dense(x, depth, use_bias=False), tf.layers.dense(memory_output, depth)) with tf.control_dependencies([output]): with tf.control_dependencies([ self.write(token["x"], token["access_logits"])]): return tf.identity(output)
Define the training setup.
def _define_train( train_env, ppo_hparams, eval_env_fn=None, sampling_temp=1.0, **collect_kwargs ): """Define the training setup.""" memory, collect_summary, train_initialization = ( _define_collect( train_env, ppo_hparams, "ppo_train", eval_phase=False, sampling_temp=sampling_temp, **collect_kwargs)) ppo_summary = ppo.define_ppo_epoch( memory, ppo_hparams, train_env.action_space, train_env.batch_size) train_summary = tf.summary.merge([collect_summary, ppo_summary]) if ppo_hparams.eval_every_epochs: # TODO(koz4k): Do we need this at all? assert eval_env_fn is not None eval_env = eval_env_fn(in_graph=True) (_, eval_collect_summary, eval_initialization) = ( _define_collect( eval_env, ppo_hparams, "ppo_eval", eval_phase=True, sampling_temp=0.0, **collect_kwargs)) return (train_summary, eval_collect_summary, (train_initialization, eval_initialization)) else: return (train_summary, None, (train_initialization,))
Train.
def _run_train(ppo_hparams, event_dir, model_dir, restarter, train_summary_op, eval_summary_op, initializers, report_fn=None, model_save_fn=None): """Train.""" summary_writer = tf.summary.FileWriter( event_dir, graph=tf.get_default_graph(), flush_secs=60) model_saver = tf.train.Saver( tf.global_variables(ppo_hparams.policy_network + "/.*") + tf.global_variables("training/" + ppo_hparams.policy_network + "/.*") + # tf.global_variables("clean_scope.*") + # Needed for sharing params. tf.global_variables("global_step") + tf.global_variables("losses_avg.*") + tf.global_variables("train_stats.*") ) global_step = tf.train.get_or_create_global_step() with tf.control_dependencies([tf.assign_add(global_step, 1)]): train_summary_op = tf.identity(train_summary_op) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for initializer in initializers: initializer(sess) trainer_lib.restore_checkpoint(model_dir, model_saver, sess) num_target_iterations = restarter.target_local_step num_completed_iterations = num_target_iterations - restarter.steps_to_go with restarter.training_loop(): for epoch_index in range(num_completed_iterations, num_target_iterations): summary = sess.run(train_summary_op) if summary_writer: summary_writer.add_summary(summary, epoch_index) if (ppo_hparams.eval_every_epochs and epoch_index % ppo_hparams.eval_every_epochs == 0): eval_summary = sess.run(eval_summary_op) if summary_writer: summary_writer.add_summary(eval_summary, epoch_index) if report_fn: summary_proto = tf.Summary() summary_proto.ParseFromString(eval_summary) for elem in summary_proto.value: if "mean_score" in elem.tag: report_fn(elem.simple_value, epoch_index) break if (model_saver and ppo_hparams.save_models_every_epochs and (epoch_index % ppo_hparams.save_models_every_epochs == 0 or (epoch_index + 1) == num_target_iterations)): ckpt_path = os.path.join( model_dir, "model.ckpt-{}".format(tf.train.global_step(sess, global_step)) ) model_saver.save(sess, ckpt_path) if model_save_fn: model_save_fn(model_dir)
Metadata for rollouts.
def _rollout_metadata(batch_env): """Metadata for rollouts.""" batch_env_shape = batch_env.observ.get_shape().as_list() batch_size = [batch_env_shape[0]] shapes_types_names = [ # TODO(piotrmilos): possibly retrieve the observation type for batch_env (batch_size + batch_env_shape[1:], batch_env.observ_dtype, "observation"), (batch_size, tf.float32, "reward"), (batch_size, tf.bool, "done"), (batch_size + list(batch_env.action_shape), batch_env.action_dtype, "action"), (batch_size, tf.float32, "pdf"), (batch_size, tf.float32, "value_function"), ] return shapes_types_names
Collect trajectories. Args: batch_env: Batch environment. ppo_hparams: PPO hparams, defined in tensor2tensor.models.research.rl. scope: var scope. frame_stack_size: Number of last observations to feed into the policy. eval_phase: TODO(koz4k): Write docstring. sampling_temp: Sampling temperature for the policy. force_beginning_resets: Whether to reset at the beginning of each episode. Returns: Returns memory (observations, rewards, dones, actions, pdfs, values_functions) containing a rollout of environment from nested wrapped structure.
def _define_collect(batch_env, ppo_hparams, scope, frame_stack_size, eval_phase, sampling_temp, force_beginning_resets): """Collect trajectories. Args: batch_env: Batch environment. ppo_hparams: PPO hparams, defined in tensor2tensor.models.research.rl. scope: var scope. frame_stack_size: Number of last observations to feed into the policy. eval_phase: TODO(koz4k): Write docstring. sampling_temp: Sampling temperature for the policy. force_beginning_resets: Whether to reset at the beginning of each episode. Returns: Returns memory (observations, rewards, dones, actions, pdfs, values_functions) containing a rollout of environment from nested wrapped structure. """ epoch_length = ppo_hparams.epoch_length to_initialize = [] with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): num_agents = batch_env.batch_size to_initialize.append(batch_env) wrappers = [(StackWrapper, { "history": frame_stack_size }), (_MemoryWrapper, {})] rollout_metadata = None speculum = None for w in wrappers: tf.logging.info("Applying wrapper %s(%s) to env %s." % (str( w[0]), str(w[1]), str(batch_env))) batch_env = w[0](batch_env, **w[1]) to_initialize.append(batch_env) rollout_metadata = _rollout_metadata(batch_env) speculum = batch_env.speculum def initialization_lambda(sess): for batch_env in to_initialize: batch_env.initialize(sess) memory = [ tf.get_variable( # pylint: disable=g-complex-comprehension "collect_memory_%d_%s" % (epoch_length, name), shape=[epoch_length] + shape, dtype=dtype, initializer=tf.zeros_initializer(), trainable=False) for (shape, dtype, name) in rollout_metadata ] cumulative_rewards = tf.get_variable( "cumulative_rewards", len(batch_env), trainable=False) eval_phase_t = tf.convert_to_tensor(eval_phase) should_reset_var = tf.Variable(True, trainable=False) zeros_tensor = tf.zeros(len(batch_env)) force_beginning_resets = tf.convert_to_tensor(force_beginning_resets) def reset_ops_group(): return tf.group( batch_env.reset(tf.range(len(batch_env))), tf.assign(cumulative_rewards, zeros_tensor)) reset_op = tf.cond( tf.logical_or(should_reset_var.read_value(), force_beginning_resets), reset_ops_group, tf.no_op) with tf.control_dependencies([reset_op]): reset_once_op = tf.assign(should_reset_var, False) with tf.control_dependencies([reset_once_op]): def step(index, scores_sum, scores_num): """Single step.""" index %= epoch_length # Only needed in eval runs. # Note - the only way to ensure making a copy of tensor is to run simple # operation. We are waiting for tf.copy: # https://github.com/tensorflow/tensorflow/issues/11186 obs_copy = batch_env.observ + 0 def env_step(arg1, arg2, arg3): # pylint: disable=unused-argument """Step of the environment.""" (logits, value_function) = get_policy( obs_copy, ppo_hparams, batch_env.action_space ) action = common_layers.sample_with_temperature(logits, sampling_temp) action = tf.cast(action, tf.int32) action = tf.reshape(action, shape=(num_agents,)) reward, done = batch_env.simulate(action) pdf = tfp.distributions.Categorical(logits=logits).prob(action) pdf = tf.reshape(pdf, shape=(num_agents,)) value_function = tf.reshape(value_function, shape=(num_agents,)) done = tf.reshape(done, shape=(num_agents,)) with tf.control_dependencies([reward, done]): return tf.identity(pdf), tf.identity(value_function), \ tf.identity(done) # TODO(piotrmilos): while_body is executed at most once, # thus should be replaced with tf.cond pdf, value_function, top_level_done = tf.while_loop( lambda _1, _2, _3: tf.equal(speculum.size(), 0), env_step, [ tf.constant(0.0, shape=(num_agents,)), tf.constant(0.0, shape=(num_agents,)), tf.constant(False, shape=(num_agents,)) ], parallel_iterations=1, back_prop=False, ) with tf.control_dependencies([pdf, value_function]): obs, reward, done, action = speculum.dequeue() to_save = [obs, reward, done, action, pdf, value_function] save_ops = [ tf.scatter_update(memory_slot, index, value) for memory_slot, value in zip(memory, to_save) ] cumulate_rewards_op = cumulative_rewards.assign_add(reward) agent_indices_to_reset = tf.where(top_level_done)[:, 0] with tf.control_dependencies([cumulate_rewards_op]): # TODO(piotrmilos): possibly we need cumulative_rewards.read_value() scores_sum_delta = tf.reduce_sum( tf.gather(cumulative_rewards.read_value(), agent_indices_to_reset)) scores_num_delta = tf.count_nonzero(done, dtype=tf.int32) with tf.control_dependencies(save_ops + [scores_sum_delta, scores_num_delta]): reset_env_op = batch_env.reset(agent_indices_to_reset) reset_cumulative_rewards_op = tf.scatter_update( cumulative_rewards, agent_indices_to_reset, tf.gather(zeros_tensor, agent_indices_to_reset)) with tf.control_dependencies([reset_env_op, reset_cumulative_rewards_op]): return [ index + 1, scores_sum + scores_sum_delta, scores_num + scores_num_delta ] def stop_condition(i, _, resets): return tf.cond(eval_phase_t, lambda: resets < num_agents, lambda: i < epoch_length) init = [tf.constant(0), tf.constant(0.0), tf.constant(0)] index, scores_sum, scores_num = tf.while_loop( stop_condition, step, init, parallel_iterations=1, back_prop=False) # We handle force_beginning_resets differently. We assume that all envs are # reseted at the end of episod (though it happens at the beginning of the # next one scores_num = tf.cond(force_beginning_resets, lambda: scores_num + len(batch_env), lambda: scores_num) with tf.control_dependencies([scores_sum]): scores_sum = tf.cond( force_beginning_resets, lambda: scores_sum + tf.reduce_sum(cumulative_rewards.read_value()), lambda: scores_sum) mean_score = tf.cond( tf.greater(scores_num, 0), lambda: scores_sum / tf.cast(scores_num, tf.float32), lambda: 0.) printing = tf.Print(0, [mean_score, scores_sum, scores_num], "mean_score: ") with tf.control_dependencies([index, printing]): memory = [mem.read_value() for mem in memory] # When generating real data together with PPO training we must use single # agent. For PPO to work we reshape the history, as if it was generated # by real_ppo_effective_num_agents. if ppo_hparams.effective_num_agents is not None and not eval_phase: new_memory = [] effective_num_agents = ppo_hparams.effective_num_agents assert epoch_length % ppo_hparams.effective_num_agents == 0, ( "The rollout of ppo_hparams.epoch_length will be distributed amongst" "effective_num_agents of agents") new_epoch_length = int(epoch_length / effective_num_agents) for mem, info in zip(memory, rollout_metadata): shape, _, name = info new_shape = [effective_num_agents, new_epoch_length] + shape[1:] perm = list(range(len(shape) + 1)) perm[0] = 1 perm[1] = 0 mem = tf.transpose(mem, perm=perm) mem = tf.reshape(mem, shape=new_shape) mem = tf.transpose( mem, perm=perm, name="collect_memory_%d_%s" % (new_epoch_length, name)) new_memory.append(mem) memory = new_memory with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): mean_score_summary = tf.cond( tf.greater(scores_num, 0), lambda: tf.summary.scalar("mean_score_this_iter", mean_score), str) summaries = tf.summary.merge([ mean_score_summary, tf.summary.scalar("episodes_finished_this_iter", scores_num) ]) return memory, summaries, initialization_lambda
Deconvolution layer.
def deconv2d( input_, output_shape, k_h, k_w, d_h, d_w, stddev=0.02, name="deconv2d"): """Deconvolution layer.""" with tf.variable_scope(name): w = tf.get_variable( "w", [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev)) deconv = tf.nn.conv2d_transpose( input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable( "biases", [output_shape[-1]], initializer=tf.constant_initializer(0.0)) return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
Basic parameters for a vanilla_gan.
def sliced_gan(): """Basic parameters for a vanilla_gan.""" hparams = common_hparams.basic_params1() hparams.optimizer = "adam" hparams.learning_rate_constant = 0.0002 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup" hparams.label_smoothing = 0.0 hparams.batch_size = 128 hparams.hidden_size = 128 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 1e-6 hparams.kernel_height = 4 hparams.kernel_width = 4 hparams.bottleneck_bits = 128 hparams.add_hparam("discriminator_batchnorm", True) hparams.add_hparam("num_sliced_vecs", 4096) return hparams
Discriminator architecture based on InfoGAN. Args: x: input images, shape [bs, h, w, channels] is_training: boolean, are we in train or eval model. reuse: boolean, should params be re-used. Returns: out_logit: the output logits (before sigmoid).
def discriminator(self, x, is_training, reuse=False): """Discriminator architecture based on InfoGAN. Args: x: input images, shape [bs, h, w, channels] is_training: boolean, are we in train or eval model. reuse: boolean, should params be re-used. Returns: out_logit: the output logits (before sigmoid). """ hparams = self.hparams with tf.variable_scope( "discriminator", reuse=reuse, initializer=tf.random_normal_initializer(stddev=0.02)): batch_size, height, width = common_layers.shape_list(x)[:3] # Mapping x from [bs, h, w, c] to [bs, 1] net = tf.layers.conv2d(x, 64, (4, 4), strides=(2, 2), padding="SAME", name="d_conv1") # [bs, h/2, w/2, 64] net = lrelu(net) net = tf.layers.conv2d(net, 128, (4, 4), strides=(2, 2), padding="SAME", name="d_conv2") # [bs, h/4, w/4, 128] if hparams.discriminator_batchnorm: net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="d_bn2") net = lrelu(net) size = height * width net = tf.reshape(net, [batch_size, size * 8]) # [bs, h * w * 8] net = tf.layers.dense(net, 1024, name="d_fc3") # [bs, 1024] if hparams.discriminator_batchnorm: net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="d_bn3") net = lrelu(net) return net
Generator outputting image in [0, 1].
def generator(self, z, is_training, out_shape): """Generator outputting image in [0, 1].""" hparams = self.hparams height, width, c_dim = out_shape batch_size = hparams.batch_size with tf.variable_scope( "generator", initializer=tf.random_normal_initializer(stddev=0.02)): net = tf.layers.dense(z, 1024, name="g_fc1") net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="g_bn1") net = lrelu(net) net = tf.layers.dense(net, 128 * (height // 4) * (width // 4), name="g_fc2") net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="g_bn2") net = lrelu(net) net = tf.reshape(net, [batch_size, height // 4, width // 4, 128]) net = deconv2d(net, [batch_size, height // 2, width // 2, 64], 4, 4, 2, 2, name="g_dc3") net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="g_bn3") net = lrelu(net) net = deconv2d(net, [batch_size, height, width, c_dim], 4, 4, 2, 2, name="g_dc4") out = tf.nn.sigmoid(net) return common_layers.convert_real_to_rgb(out)
Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss).
def body(self, features): """Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss). """ features["targets"] = features["inputs"] is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN # Input images. inputs = tf.to_float(features["targets_raw"]) # Noise vector. z = tf.random_uniform([self.hparams.batch_size, self.hparams.bottleneck_bits], minval=-1, maxval=1, name="z") # Generator output: fake images. out_shape = common_layers.shape_list(inputs)[1:4] g = self.generator(z, is_training, out_shape) losses = self.losses(inputs, g) # pylint: disable=not-callable summary_g_image = tf.reshape( g[0, :], [1] + common_layers.shape_list(inputs)[1:]) tf.summary.image("generated", summary_g_image, max_outputs=1) if is_training: # Returns an dummy output and the losses dictionary. return tf.zeros_like(inputs), losses return tf.reshape(g, tf.shape(inputs)), losses
Make Inputs for built-in datasets. Args: num_devices: how many devices to build the inputs for. dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix with "t2t_". data_dir: data directory. input_name: optional, name of the inputs from the dictionary. num_chunks: optional, into how many pieces should we chunk (large inputs). append_targets: optional, instead of inputs return a pair (inputs, targets) which is useful for autoregressive models. Returns: trax.inputs.Inputs
def inputs(num_devices, dataset_name, data_dir=None, input_name=None, num_chunks=0, append_targets=False): """Make Inputs for built-in datasets. Args: num_devices: how many devices to build the inputs for. dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix with "t2t_". data_dir: data directory. input_name: optional, name of the inputs from the dictionary. num_chunks: optional, into how many pieces should we chunk (large inputs). append_targets: optional, instead of inputs return a pair (inputs, targets) which is useful for autoregressive models. Returns: trax.inputs.Inputs """ assert data_dir, "Must provide a data directory" data_dir = os.path.expanduser(data_dir) (train_batches, train_eval_batches, eval_batches, input_name, input_shape) = _train_and_eval_batches( dataset_name, data_dir, input_name, num_devices) def numpy_stream(dataset): return dataset_to_stream( dataset, input_name, num_chunks=num_chunks, append_targets=append_targets) if num_chunks > 0: length = input_shape[0] input_shape = tuple( [tuple([length // num_chunks] + list(input_shape)[1:])] * num_chunks) return Inputs(train_stream=lambda: numpy_stream(train_batches), train_eval_stream=lambda: numpy_stream(train_eval_batches), eval_stream=lambda: numpy_stream(eval_batches), input_shape=input_shape)
Make random Inputs for debugging. Args: num_devices: how many devices to build the inputs for. input_shape: the shape of inputs (including batch dimension). input_dtype: the type of the inputs (int32 by default). input_range: the range of inputs (defaults to (0, 255)). output_shape: the shape of outputs (including batch dimension). output_dtype: the type of the outputs (int32 by default). output_range: the range of outputs (defaults to (0, 9)). Returns: trax.inputs.Inputs
def random_inputs( num_devices, input_shape=gin.REQUIRED, input_dtype=np.int32, input_range=(0, 255), output_shape=gin.REQUIRED, output_dtype=np.int32, output_range=(0, 9)): """Make random Inputs for debugging. Args: num_devices: how many devices to build the inputs for. input_shape: the shape of inputs (including batch dimension). input_dtype: the type of the inputs (int32 by default). input_range: the range of inputs (defaults to (0, 255)). output_shape: the shape of outputs (including batch dimension). output_dtype: the type of the outputs (int32 by default). output_range: the range of outputs (defaults to (0, 9)). Returns: trax.inputs.Inputs """ if input_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of input_shape[%s]", num_devices, input_shape) if output_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of output_shape[%s]", num_devices, output_shape) def random_minibatches(): """Generate a stream of random mini-batches.""" if input_dtype in [np.float16, np.float32, np.float64]: rand = np.random.uniform else: rand = np.random.random_integers while True: inp = rand(input_range[0], input_range[1], input_shape) inp = inp.astype(input_dtype) out = rand(output_range[0], output_range[1], output_shape) out = out.astype(output_dtype) yield inp, out input_shape_without_batch = list(input_shape)[1:] return Inputs(train_stream=random_minibatches, train_eval_stream=random_minibatches, eval_stream=random_minibatches, input_shape=input_shape_without_batch)
Takes a tf.Dataset and creates a numpy stream of ready batches.
def dataset_to_stream(dataset, input_name, num_chunks=0, append_targets=False): """Takes a tf.Dataset and creates a numpy stream of ready batches.""" for example in tfds.as_numpy(dataset): inp, out = example[0][input_name], example[1] if len(out.shape) > 1 and out.shape[-1] == 1: out = np.squeeze(out, axis=-1) if num_chunks > 0: inp = np.split(inp, num_chunks, axis=1) out = np.split(out, num_chunks, axis=1) if append_targets: inp = (inp, out) yield inp, out
Return train and evaluation datasets, feature info and supervised keys.
def _train_and_eval_dataset_v1(problem_name, data_dir): """Return train and evaluation datasets, feature info and supervised keys.""" assert not tf.executing_eagerly(), "tf.eager mode must be turned off." problem = t2t_problems.problem(problem_name) train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir) train_dataset = train_dataset.map(_select_features) eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir) eval_dataset = eval_dataset.map(_select_features) hparams = problem.get_hparams() # We take a few training examples to guess the shapes. input_shapes, target_shapes = [], [] example_tensor = train_dataset.make_one_shot_iterator().get_next() sess = tf.Session() example1 = sess.run(example_tensor) example2 = sess.run(example_tensor) example3 = sess.run(example_tensor) # We use "inputs" as input except for purely auto-regressive tasks like # language models where "targets" are used as input_key. input_key = "inputs" if "inputs" in example1 else "targets" supervised_keys = ([input_key], ["targets"]) for example in [example1, example2, example3]: input_shapes.append(list(example[input_key].shape)) target_shapes.append(list(example["targets"].shape)) input_vocab_size = hparams.vocab_size[input_key] target_vocab_size = hparams.vocab_size["targets"] input_info = _make_info(input_shapes, input_vocab_size) target_info = _make_info(target_shapes, target_vocab_size) info = {input_key: input_info, "targets": target_info} return train_dataset, eval_dataset, info, supervised_keys
Batching function.
def batch_fun(dataset, training, shapes, target_names, num_devices, batch_size_per_device=32, batch_size=None, eval_batch_size=32, bucket_length=32, buckets=None, batch_shuffle_size=128, max_eval_length=None): """Batching function.""" del target_names # Batch size is batch_size_per_device * num_devices unless given directly. batch_size = batch_size or batch_size_per_device * num_devices # If bucketing is not specified, check if target shapes are variable. cur_batch_size = batch_size if training else eval_batch_size # Make cur_batch_size divisible by num_devices. cur_batch_size = max(cur_batch_size // num_devices, 1) * num_devices # Create heuristic buckets is none are specified. if buckets is None: variable_target_shapes = False target_shape = shapes[1] for dim in target_shape: if dim is None: variable_target_shapes = True tf.logging.info("Heuristically setting bucketing to %s based on shapes " "of target tensors." % variable_target_shapes) if variable_target_shapes: bucket_boundaries = [bucket_length // 4, bucket_length // 2, bucket_length, bucket_length * 2, bucket_length * 4, bucket_length * 8, bucket_length * 16] # We will pad to boundaries which pads to bucket_boundary - 1: add 1 here. bucket_boundaries = [b + 1 for b in bucket_boundaries] if not training: max_eval_length = max_eval_length or bucket_length * 32 bucket_boundaries[-1] = max_eval_length bucket_batch_sizes = [cur_batch_size * 4, cur_batch_size * 2, cur_batch_size, cur_batch_size // 2, cur_batch_size // 4, cur_batch_size // 8, cur_batch_size // 16, 1] if not training: bucket_batch_sizes[-2] = cur_batch_size // max_eval_length # Make batch sizes divisible by num_devices. bucket_batch_sizes = [max(b // num_devices, 1) * num_devices for b in bucket_batch_sizes] buckets = (bucket_boundaries, bucket_batch_sizes) if buckets: tf.logging.info("Bucketing with buckets %s." % str(buckets)) def example_length(_, target): return tf.shape(target)[0] boundaries, batch_sizes = buckets dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length( example_length, boundaries, batch_sizes, pad_to_bucket_boundary=True)) else: dataset = dataset.padded_batch(cur_batch_size, shapes) if training: return dataset.shuffle(batch_shuffle_size) return dataset
Preprocessing for LM1B: filter out targets exceeding maximum length.
def lm1b_preprocess(dataset, training, max_target_length=-1, max_eval_target_length=-1): """Preprocessing for LM1B: filter out targets exceeding maximum length.""" def target_right_length(_, target): return tf.less(tf.shape(target)[0], max_target_length + 1) def eval_target_right_length(_, target): return tf.less(tf.shape(target)[0], max_eval_target_length + 1) if max_target_length > 0 and training: dataset = dataset.filter(target_right_length) if max_eval_target_length > 0 and not training: dataset = dataset.filter(eval_target_right_length) return dataset
Shuffle and batch the given dataset.
def shuffle_and_batch_data(dataset, target_names, features_info, training, num_devices, shuffle_buffer_size=1024, preprocess_fun=no_preprocess): """Shuffle and batch the given dataset.""" def append_targets(example): """Append targets to the example dictionary. Needed for Keras.""" if len(target_names) == 1: return (example, example[target_names[0]]) targets = {} for name in target_names: targets[name] = example[name] return (example, targets) dataset = dataset.map(append_targets) if training: dataset = dataset.repeat() # Skip a random fraction at the beginning of the stream. The skip is # essential for synchronous highly-parallel training to avoid multiple # replicas reading the same data in lock-step. dataset = dataset.skip(random.randint(0, _MAX_SKIP_EXAMPLES)) dataset = preprocess_fun(dataset, training) shapes = {k: features_info[k].shape for k in features_info} shapes = (shapes, shapes[target_names[0]]) dataset = dataset.shuffle(shuffle_buffer_size) dataset = batch_fun(dataset, training, shapes, target_names, num_devices) return dataset.prefetch(2)
Return train and eval batches with input name and shape.
def _train_and_eval_batches(dataset, data_dir, input_name, num_devices): """Return train and eval batches with input name and shape.""" (train_data, eval_data, features_info, keys) = train_and_eval_dataset( dataset, data_dir) input_names, target_names = keys[0], keys[1] train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True, num_devices=num_devices) train_eval_batches = shuffle_and_batch_data( # Data for eval-on-train. train_data, target_names, features_info, training=False, num_devices=num_devices) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False, num_devices=num_devices) input_name = input_name or input_names[0] input_shape = features_info[input_name].shape return (train_batches, train_eval_batches, eval_batches, input_name, list(input_shape))
Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator.
def get_multi_dataset(datasets, pmf=None): """Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator. """ pmf = tf.fill([len(datasets)], 1.0 / len(datasets)) if pmf is None else pmf samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets] sample = lambda _: categorical_case(pmf, samplers) return tf.data.Dataset.from_tensors([]).repeat().map(sample)
Computes the pmf of a schedule given the global_step. Args: schedule: A schedule tuple, see encode_schedule for details. global_step: A scalar tensor, the step to query the schedule. Returns: A 1-D tensor of probs, the sampling distribution of the global_step.
def get_schedule_distribution(schedule, global_step=None): """Computes the pmf of a schedule given the global_step. Args: schedule: A schedule tuple, see encode_schedule for details. global_step: A scalar tensor, the step to query the schedule. Returns: A 1-D tensor of probs, the sampling distribution of the global_step. """ interpolation, steps, pmfs = schedule if len(pmfs) == 1: # py_func doesn't seem to work on TPU - at least get the constant case to # run. # TODO(noam): get the general case working. return pmfs[0] if global_step is None: global_step = tf.train.get_or_create_global_step() if interpolation == 'step': interpolation_fn = step_interpolation elif interpolation == 'linear': interpolation_fn = linear_interpolation else: raise ValueError('Invalid interpolation strategy: %s' % interpolation) return tf.reshape( tf.py_func( func=lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs)), inp=[global_step], Tout=tf.float32), [len(pmfs[0])])
Returns the outputs of fns[i] with probability pmf[i]. Args: pmf: A 1-D tensor of probabilities, the probability mass function. fns: A list of callables that return tensors, same length as pmf. rand: An optional scalar between 0.0 and 1.0, the output of an RNG. Returns: A tensor, the output of fns[i] with probability pmf[i].
def categorical_case(pmf, fns, rand=None): """Returns the outputs of fns[i] with probability pmf[i]. Args: pmf: A 1-D tensor of probabilities, the probability mass function. fns: A list of callables that return tensors, same length as pmf. rand: An optional scalar between 0.0 and 1.0, the output of an RNG. Returns: A tensor, the output of fns[i] with probability pmf[i]. """ rand = tf.random_uniform([]) if rand is None else rand cmf = tf.pad(tf.cumsum(pmf), [(1, 0)]) cmf = [cmf[i] for i in range(len(fns) + 1)] preds = [(rand >= a) & (rand < b) for a, b in zip(cmf[:-1], cmf[1:])] return tf.case(list(zip(preds, fns)), exclusive=True)
Multi-dimensional linear interpolation. Returns the multi-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Keywords for np.interp. Returns: An array of shape [*N, *M], the interpolated values.
def linear_interpolation(x, xp, fp, **kwargs): """Multi-dimensional linear interpolation. Returns the multi-dimensional piecewise linear interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Keywords for np.interp. Returns: An array of shape [*N, *M], the interpolated values. """ yp = fp.reshape([fp.shape[0], -1]).transpose() y = np.stack([np.interp(x, xp, zp, **kwargs) for zp in yp]).transpose() return y.reshape(x.shape[:1] + fp.shape[1:]).astype(np.float32)
Multi-dimensional step interpolation. Returns the multi-dimensional step interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Unused. Returns: An array of shape [*N, *M], the interpolated values.
def step_interpolation(x, xp, fp, **kwargs): """Multi-dimensional step interpolation. Returns the multi-dimensional step interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Unused. Returns: An array of shape [*N, *M], the interpolated values. """ del kwargs # Unused. xp = np.expand_dims(xp, -1) lower, upper = xp[:-1], xp[1:] conditions = (x >= lower) & (x < upper) # Underflow and overflow conditions and values. Values default to fp[0] and # fp[-1] respectively. conditions = np.concatenate([[x < xp[0]], conditions, [x >= xp[-1]]]) values = np.concatenate([[fp[0]], fp]) assert np.all(np.sum(conditions, 0) == 1), 'xp must be increasing.' indices = np.argmax(conditions, 0) return values[indices].astype(np.float32)
Create a probability-mass-function based on relative epoch rates. if epoch_rates=None, then we use uniform epoch rates [1.0] * len(problems) i.e. it takes each problem the same time to go through one epoch. If epoch_rates is given, then these are the relative numbers of epochs of each problem to go through in a given amount of time. Each must have problem.num_training_examples implemented. Args: problems: a list of Problem instances. epoch_rates: an optional list of float Returns: a list of floating point values.
def epoch_rates_to_pmf(problems, epoch_rates=None): """Create a probability-mass-function based on relative epoch rates. if epoch_rates=None, then we use uniform epoch rates [1.0] * len(problems) i.e. it takes each problem the same time to go through one epoch. If epoch_rates is given, then these are the relative numbers of epochs of each problem to go through in a given amount of time. Each must have problem.num_training_examples implemented. Args: problems: a list of Problem instances. epoch_rates: an optional list of float Returns: a list of floating point values. """ if epoch_rates is None: epoch_rates = [1.0] * len(problems) example_rates = [epoch_rate * p.num_training_examples for p, epoch_rate in zip(problems, epoch_rates)] return example_rates_to_pmf(example_rates)
Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple.
def encode_schedule(schedule): """Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple. """ interpolation, steps, pmfs = schedule return interpolation + ' ' + ' '.join( '@' + str(s) + ' ' + ' '.join(map(str, p)) for s, p in zip(steps, pmfs))
Decodes a string into a schedule tuple. Args: string: The string encoding of a schedule tuple. Returns: A schedule tuple, see encode_schedule for details.
def decode_schedule(string): """Decodes a string into a schedule tuple. Args: string: The string encoding of a schedule tuple. Returns: A schedule tuple, see encode_schedule for details. """ splits = string.split() steps = [int(x[1:]) for x in splits[1:] if x[0] == '@'] pmfs = np.reshape( [float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1]) return splits[0], tuplize(steps), tuplize(pmfs)
Recursively converts iterables into tuples. Args: nested: A nested structure of items and iterables. Returns: A nested structure of items and tuples.
def tuplize(nested): """Recursively converts iterables into tuples. Args: nested: A nested structure of items and iterables. Returns: A nested structure of items and tuples. """ if isinstance(nested, str): return nested try: return tuple(map(tuplize, nested)) except TypeError: return nested
Returns a list of filepatterns, one for each problem.
def filepattern(self, *args, **kwargs): """Returns a list of filepatterns, one for each problem.""" return [p.filepattern(*args, **kwargs) for p in self.problems]
Generates data for each problem.
def generate_data(self, *args, **kwargs): """Generates data for each problem.""" for p in self.problems: p.generate_data(*args, **kwargs)
Returns a dataset containing examples from multiple problems. Args: mode: A member of problem.DatasetSplit. hparams: A tf.HParams object, the model hparams. global_step: A scalar tensor used to compute the sampling distribution. If global_step is None, we call tf.train.get_or_create_global_step by default. **kwargs: Keywords for problem.Problem.Dataset. Returns: A dataset containing examples from multiple problems.
def dataset(self, mode, hparams=None, global_step=None, **kwargs): """Returns a dataset containing examples from multiple problems. Args: mode: A member of problem.DatasetSplit. hparams: A tf.HParams object, the model hparams. global_step: A scalar tensor used to compute the sampling distribution. If global_step is None, we call tf.train.get_or_create_global_step by default. **kwargs: Keywords for problem.Problem.Dataset. Returns: A dataset containing examples from multiple problems. """ datasets = [p.dataset(mode, **kwargs) for p in self.problems] datasets = [ d.map(lambda x, i=j: self.normalize_example( # pylint: disable=g-long-lambda dict(x, problem_id=tf.constant([i])), hparams)) for j, d in enumerate(datasets) # Tag examples with a problem_id. ] if mode is problem.DatasetSplit.TRAIN: if global_step is None: global_step = tf.train.get_or_create_global_step() pmf = get_schedule_distribution(self.schedule, global_step) return get_multi_dataset(datasets, pmf) elif self.only_eval_first_problem: return datasets[0] else: datasets = [d.repeat() for d in datasets] return tf.data.Dataset.zip(tuple(datasets)).flat_map( lambda *x: functools.reduce( # pylint: disable=g-long-lambda tf.data.Dataset.concatenate, map(tf.data.Dataset.from_tensors, x)))
Assumes that example contains both inputs and targets.
def normalize_example(self, example, hparams): """Assumes that example contains both inputs and targets.""" length = self.max_length(hparams) def _to_constant_shape(tensor): tensor = tensor[:length] tensor = tf.pad(tensor, [(0, length - tf.shape(tensor)[0])]) return tf.reshape(tensor, [length]) if self.has_inputs: example['inputs'] = _to_constant_shape(example['inputs']) example['targets'] = _to_constant_shape(example['targets']) elif 'inputs' in example: if self.packed_length: raise ValueError('cannot concatenate packed examples on the fly.') inputs = example.pop('inputs')[:-1] # Remove EOS token. targets = tf.concat([inputs, example['targets']], 0) example['targets'] = _to_constant_shape(targets) else: example['targets'] = _to_constant_shape(example['targets']) if self.packed_length: if self.has_inputs: if 'inputs_segmentation' in example: example['inputs_segmentation'] = _to_constant_shape( example['inputs_segmentation']) example['inputs_position'] = _to_constant_shape( example['inputs_position']) else: example['inputs_segmentation'] = tf.to_int64( tf.not_equal(example['inputs'], 0)) example['inputs_position'] = ( example['inputs_segmentation'] * tf.range(length, dtype=tf.int64)) if 'targets_segmentation' in example: example['targets_segmentation'] = _to_constant_shape( example['targets_segmentation']) example['targets_position'] = _to_constant_shape( example['targets_position']) else: example['targets_segmentation'] = tf.to_int64( tf.not_equal(example['targets'], 0)) example['targets_position'] = ( example['targets_segmentation'] * tf.range(length, dtype=tf.int64)) return example