text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Get the min mu which minimize the surrogate. <END_TASK> <USER_TASK:> Description: def _get_mu_tensor(self): """Get the min mu which minimize the surrogate. Returns: The mu_t. """
root = self._get_cubic_root() dr = self._h_max / self._h_min mu = tf.maximum( root**2, ((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1))**2) return mu
<SYSTEM_TASK:> YellowFin auto-tuning optimizer based on momentum SGD. <END_TASK> <USER_TASK:> Description: def _yellowfin(self): """YellowFin auto-tuning optimizer based on momentum SGD. Returns: YF ops (Curvature range, Grad_variance, Dist_to_opt, Single-Step, Auto-Tuning) """
# List for the returned Operations. yellowfin_ops = [] # Curvature range ops. curv_range_ops = self._curvature_range() yellowfin_ops += curv_range_ops # Estimate of gradient Variance ops. grad_var_ops = self._grad_variance() yellowfin_ops += grad_var_ops # Distance to optimum ops. dist_to_opt_ops = self._dist_to_opt() yellowfin_ops += dist_to_opt_ops # Single-Step: minimizes the surrogate for the expected # squared distance from the optimum of a local quadratic # approximation after a single step while keeping all directions in the # robust region. self._mu = tf.identity(tf.cond(self._do_tune, self._get_mu_tensor, lambda: self._mu_var)) with tf.control_dependencies([self._mu]): self._lr = tf.identity(tf.cond(self._do_tune, self._get_lr_tensor, lambda: self._lr_var)) # Tune learning rate and momentum. with tf.control_dependencies([self._mu, self._lr]): self._mu = self._beta * self._mu_var + (1 - self._beta) * self._mu self._lr = self._beta * self._lr_var + (1 - self._beta) * self._lr yellowfin_ops.append(tf.assign(self._mu_var, self._mu)) yellowfin_ops.append(tf.assign(self._lr_var, self._lr)) yellowfin_ops = tf.group(*yellowfin_ops) return yellowfin_ops
<SYSTEM_TASK:> Applying gradients and tune hyperparams with YellowFin. <END_TASK> <USER_TASK:> Description: def apply_gradients(self, grads_and_vars, global_step=None, name=None): """Applying gradients and tune hyperparams with YellowFin. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. Returns: (A group of operations) Variable Update with Momentum ops, YellowFin ops(Curvature, Variance, Distance) ops, SingleStep and lr_mu tuning ops, Step increment ops. """
self._grad, self._vars = zip(*[(g, t) for g, t in grads_and_vars if g is not None]) # Var update with Momentum. with tf.variable_scope("apply_updates"): # Gradient Clipping? if self._clip_thresh_var is not None: self._grad, _ = tf.clip_by_global_norm( self._grad, self._clip_thresh_var) apply_grad_op = self._momentum_optimizer.apply_gradients( zip(self._grad, self._vars), global_step=global_step, name=name) else: apply_grad_op = self._momentum_optimizer.apply_gradients( zip(self._grad, self._vars), global_step=global_step, name=name) # Begin lr and mu tuning. with tf.variable_scope("prepare_yellowFin_variables"): # the dependencies ideally only need to be after clip is done, # i.e. depends on self._grads. However, the control_dependencies # does not support indexed slice for sparse gradients. # The alternative dependencies here might be slightly slower due # to less parallelization. with tf.control_dependencies([apply_grad_op,]): prepare_variables_op = self._prepare_variables() with tf.variable_scope("yellowfin"): with tf.control_dependencies([prepare_variables_op]): yellowfin_op = self._yellowfin() # Update YellowFin step variable. with tf.control_dependencies([yellowfin_op]): self._increment_step_op = tf.assign_add(self._step, 1).op return tf.group(apply_grad_op, prepare_variables_op, yellowfin_op, self._increment_step_op)
<SYSTEM_TASK:> Compute gradients through momentum optimizer. <END_TASK> <USER_TASK:> Description: def compute_gradients(self, loss, var_list, global_step=None, gate_gradients=GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None): """Compute gradients through momentum optimizer. Args: loss: A Tensor containing the value to minimize. var_list: Optional list or tuple of tf.Variable to update to minimize loss. Defaults to the list of variables collected in the graph under the key GraphKey.TRAINABLE_VARIABLES. global_step: Optional Variable to increment by one after the variables have been updated. gate_gradients: How to gate the computation of gradients. Can be GATE_NONE, GATE_OP, or GATE_GRAPH. aggregation_method: Specifies the method used to combine gradient terms. Valid values are defined in the class AggregationMethod. colocate_gradients_with_ops: If True, try collocating gradients with the corresponding op. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. grad_loss: Optional. A Tensor holding the gradient computed for loss. Returns: A list of (gradient, variable) pairs. Variable is always present, but gradient can be None. """
del global_step, name # Unused for now. return self._momentum_optimizer.compute_gradients( loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss)
<SYSTEM_TASK:> Adapted from TensorFlow Optimizer base class member function. <END_TASK> <USER_TASK:> Description: def minimize(self, loss, global_step=None, var_list=None, gate_gradients=GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None): """Adapted from TensorFlow Optimizer base class member function. Add operations to minimize `loss` by updating `var_list`. This method simply combines calls `compute_gradients()` and `apply_gradients()`. If you want to process the gradient before applying them call `tf.gradients()` and `self.apply_gradients()` explicitly instead of using this function. Args: loss: A Tensor containing the value to minimize. global_step: Optional Variable to increment by one after the variables have been updated. var_list: Optional list or tuple of Variable objects to update to minimize loss. Defaults to the list of variables collected in the graph under the key GraphKeys.TRAINABLE_VARIABLES. gate_gradients: How to gate the computation of gradients. Can be GATE_NONE, GATE_OP, or GATE_GRAPH. aggregation_method: Specifies the method used to combine gradient terms. Valid values are defined in the class AggregationMethod. colocate_gradients_with_ops: If True, try collocating gradients with the corresponding op. name: Optional name for the returned operation. grad_loss: Optional. A Tensor holding the gradient computed for loss. Returns: An Operation that updates the variables in var_list. If global_step was not None, that operation also increments global_step. Raises: ValueError: if no gradients are provided for any variable. """
grads_and_vars = self._momentum_optimizer.compute_gradients( loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss) vars_with_grad = [v for g, v in grads_and_vars if g is not None] if not vars_with_grad: raise ValueError( "No gradients provided for any variable, check your graph for ops" " that do not support gradients, between variables %s and loss %s." % ([str(v) for _, v in grads_and_vars], loss)) for g, v in grads_and_vars: print("g ", g) print("v ", v) return self.apply_gradients(grads_and_vars, global_step=global_step, name=name)
<SYSTEM_TASK:> ByteNet, main step used for training. <END_TASK> <USER_TASK:> Description: def bytenet_internal(inputs, targets, hparams): """ByteNet, main step used for training."""
with tf.variable_scope("bytenet"): # Flatten inputs and extend length by 50%. inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2) extend_length = tf.to_int32(0.5 * tf.to_float(tf.shape(inputs)[1])) inputs_shape = inputs.shape.as_list() inputs = tf.pad(inputs, [[0, 0], [0, extend_length], [0, 0], [0, 0]]) inputs_shape[1] = None inputs.set_shape(inputs_shape) # Don't lose the other shapes when padding. # Pad inputs and targets to be the same length, divisible by 50. inputs, targets = common_layers.pad_to_same_length( inputs, targets, final_length_divisible_by=50) final_encoder = residual_dilated_conv(inputs, hparams.num_block_repeat, "SAME", "encoder", hparams) shifted_targets = common_layers.shift_right(targets) kernel = (hparams.kernel_height, hparams.kernel_width) decoder_start = common_layers.conv_block( tf.concat([final_encoder, shifted_targets], axis=3), hparams.hidden_size, [((1, 1), kernel)], padding="LEFT") return residual_dilated_conv(decoder_start, hparams.num_block_repeat, "LEFT", "decoder", hparams)
<SYSTEM_TASK:> Downloads and prepairs the dataset to be parsed by the data_generator. <END_TASK> <USER_TASK:> Description: def _download_and_parse_dataset(tmp_dir, train): """Downloads and prepairs the dataset to be parsed by the data_generator."""
file_path = generator_utils.maybe_download(tmp_dir, _SNLI_ZIP, _SNLI_URL) zip_ref = zipfile.ZipFile(file_path, 'r') zip_ref.extractall(tmp_dir) zip_ref.close() file_name = 'train' if train else 'dev' dataset_file_path = os.path.join(tmp_dir, _SNLI_DATA_PATH % file_name) _parse_dataset(dataset_file_path, tmp_dir, train)
<SYSTEM_TASK:> Convert the dataset in to a simpler format. <END_TASK> <USER_TASK:> Description: def _parse_dataset(file_path, tmp_dir, train): """Convert the dataset in to a simpler format. This function creates two files. One for being processed to produce a vocab and another to generate the data. Args: file_path: string, path to the file to parse. tmp_dir: string, path to the directory to output the files. train: bool, indicating if we are parsing the training set. """
input_path = file_path file_name = 'train' if train else 'dev' gen_output_path = os.path.join(tmp_dir, file_name + '.txt') example_output_path = os.path.join(tmp_dir, _EXAMPLES_FILE) print('input path: ' + input_path) print('gen_output_path: ' + gen_output_path) print('example_output_path: ' + example_output_path) input_file = tf.gfile.Open(input_path, mode='r') examples = [] for counter, line in enumerate(input_file): if counter == 0: # Ignore first line since its a header. continue # Get the token and embedding vector. line_split = line.split('\t') parse1 = line_split[_PARSE1_INDEX] parse2 = line_split[_PARSE2_INDEX] consensus_label = line_split[_LABEL_INDEX] tokens1 = _get_tokens_and_tags(parse1) tokens2 = _get_tokens_and_tags(parse2) tokens1_str = ' '.join(tokens1) tokens2_str = ' '.join(tokens2) if consensus_label != '-': examples.append([tokens1_str, tokens2_str, consensus_label]) input_file.close() # Output tab delimited file of lines of examples (sentence1, sentence2, label) with tf.gfile.GFile(gen_output_path, 'w') as f: for tokens1_str, tokens2_str, consensus_label in examples: f.write('%s\t%s\t%s\n' % (tokens1_str, tokens2_str, consensus_label)) if train: # Output file containing all the sentences for generating the vocab from. with tf.gfile.GFile(example_output_path, 'w') as f: for tokens1_str, tokens2_str, consensus_label in examples: f.write('%s %s\n' % (tokens1_str, tokens2_str))
<SYSTEM_TASK:> Read or create vocabulary. <END_TASK> <USER_TASK:> Description: def _get_or_generate_vocab(tmp_dir, vocab_filename, vocab_size): """Read or create vocabulary."""
vocab_filepath = os.path.join(tmp_dir, vocab_filename) print('Vocab file written to: ' + vocab_filepath) if tf.gfile.Exists(vocab_filepath): gs = text_encoder.SubwordTextEncoder(vocab_filepath) return gs example_file = os.path.join(tmp_dir, _EXAMPLES_FILE) gs = text_encoder.SubwordTextEncoder() token_counts = tokenizer.corpus_token_counts( example_file, corpus_max_lines=1000000) gs = gs.build_to_target_size( vocab_size, token_counts, min_val=1, max_val=1e3) gs.store_to_file(vocab_filepath) return gs
<SYSTEM_TASK:> Split items into num_shards groups. <END_TASK> <USER_TASK:> Description: def shard(items, num_shards): """Split items into num_shards groups."""
sharded = [] num_per_shard = len(items) // num_shards start = 0 for _ in range(num_shards): sharded.append(items[start:start + num_per_shard]) start += num_per_shard remainder = len(items) % num_shards start = len(items) - remainder for i in range(remainder): sharded[i].append(items[start + i]) assert sum([len(fs) for fs in sharded]) == len(items) return sharded
<SYSTEM_TASK:> An initializer function for random uniform Glorot-scaled coefficients. <END_TASK> <USER_TASK:> Description: def GlorotUniformInitializer(out_dim=0, in_dim=1): """An initializer function for random uniform Glorot-scaled coefficients."""
def init(shape, rng): fan_in, fan_out = shape[in_dim], shape[out_dim] std = np.sqrt(2.0 / (fan_in + fan_out)) a = np.sqrt(3.0) * std return backend.random.uniform(rng, shape, minval=-a, maxval=a) return init
<SYSTEM_TASK:> Make a n+1 dim one-hot array from n dim int-categorical array. <END_TASK> <USER_TASK:> Description: def one_hot(x, size, dtype=np.float32): """Make a n+1 dim one-hot array from n dim int-categorical array."""
return np.array(x[..., np.newaxis] == np.arange(size), dtype)
<SYSTEM_TASK:> Convert padding string to list of pairs of pad values. <END_TASK> <USER_TASK:> Description: def padtype_to_pads(in_shape, window_shape, window_strides, padding): """Convert padding string to list of pairs of pad values."""
padding = padding.upper() if padding == 'SAME': out_shape = onp.ceil( onp.true_divide(in_shape, window_strides)).astype(int) pad_sizes = [max((out_size - 1) * stride + window_shape - in_size, 0) for out_size, stride, window_shape, in_size in zip(out_shape, window_strides, window_shape, in_shape)] return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes] elif padding == 'VALID': return [(0, 0)] * len(in_shape) else: msg = 'Unknown padding type: {}.' raise TypeError(msg.format(padding))
<SYSTEM_TASK:> Helper to initialize batch norm params. <END_TASK> <USER_TASK:> Description: def _batch_norm_new_params(input_shape, rng, axis=(0, 1, 2), center=True, scale=True, **kwargs): """Helper to initialize batch norm params."""
del rng, kwargs axis = (axis,) if np.isscalar(axis) else axis shape = tuple(d for i, d in enumerate(input_shape) if i not in axis) beta = np.zeros(shape, dtype='float32') if center else () gamma = np.ones(shape, dtype='float32') if scale else () return (beta, gamma)
<SYSTEM_TASK:> Layer construction function for a batch normalization layer. <END_TASK> <USER_TASK:> Description: def BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5, center=True, scale=True, **unused_kwargs): """Layer construction function for a batch normalization layer."""
mean = np.mean(x, axis, keepdims=True) # Fast but less numerically-stable variance calculation than np.var. m1 = np.mean(x**2, axis, keepdims=True) var = m1 - mean**2 z = (x - mean) / np.sqrt(var + epsilon) # Expand the parameters to have the right axes. beta, gamma = params # TODO(phawkins): np.expand_dims should accept an axis tuple. # (https://github.com/numpy/numpy/issues/12290) ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x))) beta = beta[ed] gamma = gamma[ed] # Return the z rescaled by the parameters if requested. if center and scale: return gamma * z + beta if center: return z + beta if scale: return gamma * z return z
<SYSTEM_TASK:> Layer construction function for a dropout layer with given rate. <END_TASK> <USER_TASK:> Description: def Dropout(x, params, rate=0.0, mode='train', rng=None, **kwargs): """Layer construction function for a dropout layer with given rate."""
del params, kwargs if rng is None: msg = ('Dropout layer requires apply_fun to be called with a rng keyword ' 'argument. That is, instead of `Dropout(params, inputs)`, call ' 'it like `Dropout(params, inputs, rng=key)`.') raise ValueError(msg) if rate >= 1.0: raise ValueError('Dropout rate (%f) must be lower than 1.' % rate) if mode == 'train' and rate > 0.0: keep = backend.random.bernoulli(rng, 1.0 - rate, x.shape) return np.where(keep, x / (1.0 - rate), 0) else: return x
<SYSTEM_TASK:> Helper to calculate the kernel shape. <END_TASK> <USER_TASK:> Description: def _kernel_shape(self, input_shape): """Helper to calculate the kernel shape."""
kernel_size_iter = iter(self._kernel_size) return [self._filters if c == 'O' else input_shape[self._lhs_spec.index('C')] if c == 'I' else next(kernel_size_iter) for c in self._rhs_spec]
<SYSTEM_TASK:> Compute the shape of a conv given input shapes in canonical order. <END_TASK> <USER_TASK:> Description: def _conv_shape_tuple(self, lhs_shape, rhs_shape, strides, pads): """Compute the shape of a conv given input shapes in canonical order."""
if isinstance(pads, str): pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads) if len(pads) != len(lhs_shape) - 2: msg = 'Wrong number of explicit pads for conv: expected {}, got {}.' raise TypeError(msg.format(len(lhs_shape) - 2, len(pads))) lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads))) out_space = onp.floor_divide( onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1 out_space = onp.maximum(0, out_space) out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space) return tuple(out_shape)
<SYSTEM_TASK:> Generalized computation of conv shape. <END_TASK> <USER_TASK:> Description: def _conv_general_shape_tuple(self, lhs_shape, rhs_shape, window_strides, padding, dimension_numbers): """Generalized computation of conv shape."""
lhs_perm, rhs_perm, out_perm = self._conv_general_permutations( dimension_numbers) lhs_trans = onp.take(lhs_shape, lhs_perm) rhs_trans = onp.take(rhs_shape, rhs_perm) out_trans = self._conv_shape_tuple( lhs_trans, rhs_trans, window_strides, padding) return tuple(onp.take(out_trans, onp.argsort(out_perm)))
<SYSTEM_TASK:> Factory for dopamine agent initialization. <END_TASK> <USER_TASK:> Description: def get_create_agent(agent_kwargs): """Factory for dopamine agent initialization. Args: agent_kwargs: dict of BatchDQNAgent parameters Returns: Function(sess, environment, summary_writer) -> BatchDQNAgent instance. """
def create_agent(sess, environment, summary_writer=None): """Creates a DQN agent. Simplified version of `dopamine.discrete_domains.train.create_agent` Args: sess: a session environment: an environment summary_writer: a summary writer. Returns: a DQN agent. """ return BatchDQNAgent( env_batch_size=environment.batch_size, sess=sess, num_actions=environment.action_space.n, summary_writer=summary_writer, tf_device="/gpu:*", **agent_kwargs) return create_agent
<SYSTEM_TASK:> Factory for dopamine environment initialization function. <END_TASK> <USER_TASK:> Description: def get_create_batch_env_fun(batch_env_fn, time_limit): """Factory for dopamine environment initialization function. Args: batch_env_fn: function(in_graph: bool) -> batch environment. time_limit: time steps limit for environment. Returns: function (with optional, unused parameters) initializing environment. """
def create_env_fun(game_name=None, sticky_actions=None): del game_name, sticky_actions batch_env = batch_env_fn(in_graph=False) batch_env = ResizeBatchObservation(batch_env) # pylint: disable=redefined-variable-type batch_env = DopamineBatchEnv(batch_env, max_episode_steps=time_limit) return batch_env return create_env_fun
<SYSTEM_TASK:> Split hparams, based on key prefixes. <END_TASK> <USER_TASK:> Description: def _parse_hparams(hparams): """Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. """
prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"] ret = [] for prefix in prefixes: ret_dict = {} for key in hparams.values(): if prefix in key: par_name = key[len(prefix):] ret_dict[par_name] = hparams.get(key) ret.append(ret_dict) return ret
<SYSTEM_TASK:> Hparams for qualitative video generation results. <END_TASK> <USER_TASK:> Description: def next_frame_glow_bair_qual(): """Hparams for qualitative video generation results."""
hparams = next_frame_glow_bair_quant() hparams.coupling = "additive" hparams.temperature = 0.5 hparams.coupling_width = 392 return hparams
<SYSTEM_TASK:> Hparams for qualitative and quantitative results on shapes dataset. <END_TASK> <USER_TASK:> Description: def next_frame_glow_shapes(): """Hparams for qualitative and quantitative results on shapes dataset."""
hparams = next_frame_glow_bair_quant() hparams.video_num_input_frames = 1 hparams.video_num_target_frames = 2 hparams.num_train_frames = 2 hparams.num_cond_latents = 1 hparams.coupling = "additive" hparams.coupling_width = 512 hparams.latent_encoder_depth = 10 hparams.latent_skip = False hparams.learning_rate_constant = 1e-4 hparams.batch_size = 10 return hparams
<SYSTEM_TASK:> Small fully connected model. <END_TASK> <USER_TASK:> Description: def basic_fc_small(): """Small fully connected model."""
hparams = common_hparams.basic_params1() hparams.learning_rate = 0.1 hparams.batch_size = 128 hparams.hidden_size = 256 hparams.num_hidden_layers = 2 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.dropout = 0.0 return hparams
<SYSTEM_TASK:> Image generator for Imagenet 64x64 downsampled images. <END_TASK> <USER_TASK:> Description: def imagenet_pixelrnn_generator(tmp_dir, training, size=_IMAGENET_SMALL_IMAGE_SIZE): """Image generator for Imagenet 64x64 downsampled images. It assumes that the data has been downloaded from http://image-net.org/small/*_32x32.tar or http://image-net.org/small/*_64x64.tar into tmp_dir. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. size: image size (assumes height and width are same) Yields: A dictionary representing the images with the following fields: * image/encoded: the string encoding the image as JPEG, * image/format: the string "jpeg" representing image format, * image/height: an integer representing the height, * image/width: an integer representing the width. Every field is actually a list of the corresponding type. """
if size == _IMAGENET_SMALL_IMAGE_SIZE: train_prefix = _IMAGENET_SMALL_TRAIN_PREFIX eval_prefix = _IMAGENET_SMALL_EVAL_PREFIX else: train_prefix = _IMAGENET_MEDIUM_TRAIN_PREFIX eval_prefix = _IMAGENET_MEDIUM_EVAL_PREFIX prefix = train_prefix if training else eval_prefix images_filepath = os.path.join(tmp_dir, prefix) image_files = tf.gfile.Glob(images_filepath + "/*") height = size width = size const_label = 0 for filename in image_files: with tf.gfile.Open(filename, "r") as f: encoded_image = f.read() yield { "image/encoded": [encoded_image], "image/format": ["png"], "image/class/label": [const_label], "image/height": [height], "image/width": [width] }
<SYSTEM_TASK:> Preprocessing used for Imagenet and similar problems. <END_TASK> <USER_TASK:> Description: def imagenet_preprocess_example(example, mode, resize_size=None, normalize=True): """Preprocessing used for Imagenet and similar problems."""
resize_size = resize_size or [299, 299] assert resize_size[0] == resize_size[1] image = example["inputs"] if mode == tf.estimator.ModeKeys.TRAIN: image = preprocess_for_train(image, image_size=resize_size[0], normalize=normalize) else: image = preprocess_for_eval(image, image_size=resize_size[0], normalize=normalize) example["inputs"] = image return example
<SYSTEM_TASK:> Crops the given image using the provided offsets and sizes. <END_TASK> <USER_TASK:> Description: def _crop(image, offset_height, offset_width, crop_height, crop_width): """Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: `Tensor` image of shape [height, width, channels]. offset_height: `Tensor` indicating the height offset. offset_width: `Tensor` indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """
original_shape = tf.shape(image) rank_assertion = tf.Assert( tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."]) with tf.control_dependencies([rank_assertion]): cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ["Crop size greater than the image size."]) offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) # Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. with tf.control_dependencies([size_assertion]): image = tf.slice(image, offsets, cropped_shape) return tf.reshape(image, cropped_shape)
<SYSTEM_TASK:> Generates cropped_image using a one of the bboxes randomly distorted. <END_TASK> <USER_TASK:> Description: def distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None): """Generates cropped_image using a one of the bboxes randomly distorted. See `tf.image.sample_distorted_bounding_box` for more documentation. Args: image: `Tensor` of image (it will be converted to floats in [0, 1]). bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where each coordinate is [0, 1) and the coordinates are arranged as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image. min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. aspect_ratio_range: An optional list of `float`s. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `float`s. The cropped area of the image must contain a fraction of the supplied image within in this range. max_attempts: An optional `int`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. scope: Optional `str` for name scope. Returns: (cropped image `Tensor`, distorted bbox `Tensor`). """
with tf.name_scope(scope, default_name="distorted_bounding_box_crop", values=[image, bbox]): # Each bounding box has shape [1, num_boxes, box coords] and # the coordinates are ordered [ymin, xmin, ymax, xmax]. # A large fraction of image datasets contain a human-annotated bounding # box delineating the region of the image containing the object of interest. # We choose to create a new bounding box for the object which is a randomly # distorted version of the human-annotated bounding box that obeys an # allowed range of aspect ratios, sizes and overlap with the human-annotated # bounding box. If no box is supplied, then we assume the bounding box is # the entire image. sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box # Crop the image to the specified bounding box. cropped_image = tf.slice(image, bbox_begin, bbox_size) return cropped_image, distort_bbox
<SYSTEM_TASK:> At least `x` of `a` and `b` `Tensors` are true. <END_TASK> <USER_TASK:> Description: def _at_least_x_are_true(a, b, x): """At least `x` of `a` and `b` `Tensors` are true."""
match = tf.equal(a, b) match = tf.cast(match, tf.int32) return tf.greater_equal(tf.reduce_sum(match), x)
<SYSTEM_TASK:> Rescale the image by scaling the smaller spatial dimension to `size`. <END_TASK> <USER_TASK:> Description: def _do_scale(image, size): """Rescale the image by scaling the smaller spatial dimension to `size`."""
shape = tf.cast(tf.shape(image), tf.float32) w_greater = tf.greater(shape[0], shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return tf.image.resize_bicubic([image], shape)[0]
<SYSTEM_TASK:> Crops to center of image with specified `size`. <END_TASK> <USER_TASK:> Description: def _center_crop(image, size): """Crops to center of image with specified `size`."""
image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] offset_height = ((image_height - size) + 1) / 2 offset_width = ((image_width - size) + 1) / 2 image = _crop(image, offset_height, offset_width, size, size) return image
<SYSTEM_TASK:> Normalize the image to zero mean and unit variance. <END_TASK> <USER_TASK:> Description: def _normalize(image): """Normalize the image to zero mean and unit variance."""
offset = tf.constant(MEAN_RGB, shape=[1, 1, 3]) image -= offset scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3]) image /= scale return image
<SYSTEM_TASK:> Factor-based learning rate schedule. <END_TASK> <USER_TASK:> Description: def MultifactorSchedule(history=None, factors="constant * linear_warmup * rsqrt_decay", constant=0.1, warmup_steps=100, decay_factor=0.5, steps_per_decay=20000): """Factor-based learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every: Every k steps decay the learning rate by decay_factor. Args: history: the history of training and evaluation (History object). factors: a string with factors separated by "*" that defines the schedule. constant: float, the starting constant for the learning rate schedule. warmup_steps: how many steps to warm up for in the warmup schedule. decay_factor: The amount to decay the learning rate by. steps_per_decay: How often to decay the learning rate. Returns: a function learning_rate(step): float -> float, the step-dependent lr. """
del history cache_args = (factors, constant, warmup_steps) if cache_args in _memoized_multifactor_schedules: return _memoized_multifactor_schedules[cache_args] factors = [n.strip() for n in factors.split("*")] def learning_rate(step): # pylint: disable=invalid-name """Step to learning rate function.""" ret = 1.0 for name in factors: if name == "constant": ret *= constant elif name == "linear_warmup": ret *= np.minimum(1.0, step / warmup_steps) elif name == "rsqrt_decay": ret /= np.sqrt(np.maximum(step, warmup_steps)) elif name == "decay_every": ret *= (decay_factor ** (step//steps_per_decay)) else: raise ValueError("Unknown factor %s." % name) return ret _memoized_multifactor_schedules[cache_args] = learning_rate return learning_rate
<SYSTEM_TASK:> Learning rate that decreases when eval metric stalls. <END_TASK> <USER_TASK:> Description: def EvalAdjustingSchedule(history, constant=0.1, steps_to_decrease=20, improvement_margin=0.001, decrease_rate=1.5, history_mode="eval", metric="metrics/accuracy"): """Learning rate that decreases when eval metric stalls. If the chosen metric does not improve by improvement_margin for as many as steps_to_decrease steps, then the constant gets decreased by decrease rate. Finally, the MultifactorSchedule gets called with the adjusted constant. Args: history: trax.history.History, the history of training and evaluation. constant: float, the starting constant for the learning rate schedule. steps_to_decrease: int, after how many steps without improvement should we decrease the constant. improvement_margin: how much we need to improve to consider the metric improved. decrease_rate: by what fraction to decrease (i.e. lr /= decrease_rate). history_mode: str, which mode of the history to use. metric: which evaluation metric to use for adjustments. Returns: a function learning_rate(step): float -> float, the step-dependent lr. """
metrics = history.get(history_mode, metric) adjusted = constant if len(metrics) < 2: return MultifactorSchedule(history, constant=adjusted) steps_without_improvement = 0 cur = metrics.pop()[1] # The most-recent value of the metric. while len(metrics) > 1: # The one-before value of metrics as .pop() removes one element each time. prev = metrics.pop()[1] if cur < prev * (1 + improvement_margin): steps_without_improvement += 1 else: cur = prev steps_without_improvement = 0 if steps_without_improvement >= steps_to_decrease: adjusted /= decrease_rate cur = prev steps_without_improvement = 0 return MultifactorSchedule(history, constant=adjusted)
<SYSTEM_TASK:> Project encoder hidden state under num_blocks using projection tensors. <END_TASK> <USER_TASK:> Description: def project_hidden(x, projection_tensors, hidden_size, num_blocks): """Project encoder hidden state under num_blocks using projection tensors. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. projection_tensors: Projection tensors used to project the hidden state. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: x_projected: Projected states of shape [batch_size, latent_dim, num_blocks, hidden_size / num_blocks]. """
batch_size, latent_dim, _ = common_layers.shape_list(x) x = tf.reshape(x, shape=[1, -1, hidden_size]) x_tiled = tf.reshape( tf.tile(x, multiples=[num_blocks, 1, 1]), shape=[num_blocks, -1, hidden_size]) x_projected = tf.matmul(x_tiled, projection_tensors) x_projected = tf.transpose(x_projected, perm=[1, 0, 2]) x_4d = tf.reshape(x_projected, [batch_size, latent_dim, num_blocks, -1]) return x_4d
<SYSTEM_TASK:> Slice encoder hidden state under num_blocks. <END_TASK> <USER_TASK:> Description: def slice_hidden(x, hidden_size, num_blocks): """Slice encoder hidden state under num_blocks. Args: x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. hidden_size: Dimension of the latent space. num_blocks: Number of blocks in DVQ. Returns: Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim]. """
batch_size, latent_dim, _ = common_layers.shape_list(x) block_dim = hidden_size // num_blocks x_sliced = tf.reshape(x, shape=[batch_size, latent_dim, num_blocks, block_dim]) return x_sliced
<SYSTEM_TASK:> Compute nearest neighbors and loss for training the embeddings via DVQ. <END_TASK> <USER_TASK:> Description: def embedding_lookup(x, means, num_blocks, block_v_size, bottleneck_kind="dvq", random_top_k=1, soft_em=False, num_samples=1, do_hard_gumbel_softmax=False, temperature_warmup_steps=150000, num_flows=0, approximate_gs_entropy=False, sum_over_latents=False): """Compute nearest neighbors and loss for training the embeddings via DVQ. Args: x: Continuous encodings of shape [batch_size, latent_dim, num_blocks, block_dim]. means: Embedding table of shape [num_blocks, block_v_size, block_dim]. num_blocks: Number of blocks in DVQ. block_v_size: Number of table entries per block. bottleneck_kind: Discrete bottleneck type. random_top_k: Noisy top-k if this is bigger than 1. soft_em: If True then use soft EM rather than hard EM. num_samples: Number of samples to use for soft EM. do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax samples for gumbel-softmax-dvq bottleneck. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. Used only if bottleneck_kind is gumbel-softmax-dvq. num_flows: Number of inverse autoregressive flows for gumbel-softmax-dvq bottleneck. approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density as a categorical distribution when calculating the sample entropy. Used only if bottleneck_kind is gumbel-softmax-dvq. sum_over_latents: Whether to sum over non-batch dimensions when calculating negative entropy loss. Used only if soft EM or when bottleneck_kind is gumbel-softmax-dvq. Returns: x_means_hot: The nearest neighbor in one hot form, with shape [batch_size * latent_dim, num_blocks, block_v_size]. x_means: The nearest neighbor itself, with shape [batch_size * latent_dim, num_blocks, block_dim]. q_loss: Scalar Tensor representing codebook loss. e_loss: Scalar Tensor representing commitment loss. neg_q_entropy: Scalar Tensor representing negative entropy of variational approximation (0 if it is deterministic). """
if bottleneck_kind == "gumbel-softmax-dvq": x_means_hot, neg_q_entropy = gumbel_softmax_nearest_neighbor_dvq( x, means, block_v_size, hard=do_hard_gumbel_softmax, num_samples=num_samples, temperature_warmup_steps=temperature_warmup_steps, num_flows=num_flows, approximate_gs_entropy=approximate_gs_entropy, sum_over_latents=sum_over_latents) else: x_means_hot, neg_q_entropy = nearest_neighbor( x, means, block_v_size, random_top_k, soft_em=soft_em, num_samples=num_samples, sum_over_latents=sum_over_latents) x_means_hot_flat = tf.reshape(x_means_hot, [-1, num_blocks, block_v_size]) x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = tf.transpose(x_means, [1, 0, 2]) batch_size, latent_dim, num_blocks, block_dim = common_layers.shape_list(x) x = tf.reshape(x, [batch_size * latent_dim, num_blocks, block_dim]) # Currently, we use the mean scaling for the commitment loss, as opposed to # summing across all non-batch dimensions. q_loss = tf.reduce_mean(tf.squared_difference(tf.stop_gradient(x), x_means)) e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss, neg_q_entropy
<SYSTEM_TASK:> Simple variational autoencoder without discretization. <END_TASK> <USER_TASK:> Description: def vae(x, z_size, name=None): """Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga. """
with tf.variable_scope(name, default_name="vae"): mu = tf.layers.dense(x, z_size, name="mu") log_sigma = tf.layers.dense(x, z_size, name="log_sigma") shape = common_layers.shape_list(x) epsilon = tf.random_normal([shape[0], shape[1], 1, z_size]) z = mu + tf.exp(log_sigma / 2) * epsilon kl = 0.5 * tf.reduce_mean( tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) free_bits = z_size // 4 kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) return z, kl_loss, mu, log_sigma
<SYSTEM_TASK:> Sample from the Gumbel distribution, protect from overflows. <END_TASK> <USER_TASK:> Description: def gumbel_sample(shape): """Sample from the Gumbel distribution, protect from overflows. Args: shape: Shape of Gumbel samples. Returns: Noise drawn from Gumbel distribution. """
uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998) return -tf.log(-tf.log(uniform_samples))
<SYSTEM_TASK:> Gumbel softmax discretization bottleneck. <END_TASK> <USER_TASK:> Description: def gumbel_softmax(x, z_size, mode, softmax_k=0, temperature_warmup_steps=150000, summary=True, name=None): """Gumbel softmax discretization bottleneck. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. mode: tf.estimator.ModeKeys. softmax_k: If > 0 then do top-k softmax. temperature_warmup_steps: Number of steps it takes to decay temperature to 0. summary: Whether to write summaries. name: Name for the bottleneck scope. Returns: Embedding function, discrete code, and loss. """
with tf.variable_scope(name, default_name="gumbel_softmax"): m = tf.layers.dense(x, 2**z_size, name="mask") if softmax_k > 0: m, kl = top_k_softmax(m, softmax_k) return m, m, 1.0 - tf.reduce_mean(kl) logsm = tf.nn.log_softmax(m) # Gumbel-softmax sample. gumbel_samples = gumbel_sample(common_layers.shape_list(m)) steps = temperature_warmup_steps gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5 temperature = 1.2 - common_layers.inverse_lin_decay(steps) # 10% of the time keep reasonably high temperature to keep learning. temperature = tf.cond( tf.less(tf.random_uniform([]), 0.9), lambda: temperature, lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) s = tf.nn.softmax((logsm + gumbel_samples) / temperature) m = tf.nn.softmax(m) kl = -tf.reduce_max(logsm, axis=-1) if summary: tf.summary.histogram("max-log", tf.reshape(kl, [-1])) # Calculate the argmax and construct hot vectors. maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1]) maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size)) # Add losses that prevent too few being used. distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True) d_variance = tf.reduce_mean( tf.squared_difference(distrib, d_mean), axis=[0]) d_dev = -tf.reduce_mean(d_variance) ret = s if mode != tf.estimator.ModeKeys.TRAIN: ret = tf.reshape(maxvhot, common_layers.shape_list(s)) # Just hot @eval. return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002
<SYSTEM_TASK:> Discretize each x into one of codebook_size codes. <END_TASK> <USER_TASK:> Description: def vq_body(x, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): """Discretize each x into one of codebook_size codes."""
x_shape = common_layers.shape_list(x) hidden_size = x_shape[-1] means, ema_means, ema_count = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) x_means_hot, e_loss, distances = vq_nearest_neighbor( x, means, soft_em=soft_em, num_samples=num_samples, temperature=temperature) def loss_with_update(): """Update the ema variables and return loss triggering the update.""" updated_ema_count = moving_averages.assign_moving_average( ema_count, tf.reduce_sum(tf.reshape(x_means_hot, shape=[-1, codebook_size]), axis=0), decay, zero_debias=False) dw = tf.matmul(x_means_hot, x, transpose_a=True) updated_ema_means = tf.identity( moving_averages.assign_moving_average( ema_means, dw, decay, zero_debias=False)) n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ( (updated_ema_count + epsilon) / (n + codebook_size * epsilon) * n) updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) with tf.control_dependencies([e_loss]): update_means = means.assign(updated_ema_means) with tf.control_dependencies([update_means]): return beta * e_loss # Loss, also do update if requested. if do_update: loss = loss_with_update() else: loss = tf.cond(do_update, loss_with_update, lambda: beta * e_loss) d = tf.reshape(x_means_hot, x_shape[:-1] + [codebook_size]) return d, loss, distances
<SYSTEM_TASK:> Compute the loss of large vocab tensors using a VQAE codebook. <END_TASK> <USER_TASK:> Description: def vq_loss(x, targets, codebook_size, beta=0.25, decay=0.999, epsilon=1e-5, soft_em=False, num_samples=10, temperature=None, do_update=True): """Compute the loss of large vocab tensors using a VQAE codebook. Args: x: Tensor of inputs to be quantized to nearest code targets: Tensor of target indices to target codes codebook_size: Size of quantization codebook beta: scalar float for moving averages decay: scalar float for moving averages epsilon: scalar float for moving averages soft_em: boolean, whether to apply a soft sampling procedure num_samples: if soft_em, number of samples to take temperature: temperature if we want to sample nearest neighbors or None do_update: whether to update the means; True by default, can be a Tensor Returns: discrete_x: one-hot Tensor indicating which codebook element is closest to x x_means: Tensor, on the forward pass: closest codebook element to x, on the backwards pass: soft convex-combination of codebook elements by proximity to x target_means: the codebook elements corresponding to the targets code_loss: loss driving x closer to its nearest codebook element targets_loss: cross-entropy loss driving x closer to code corresponding to target """
x_shape = common_layers.shape_list(x) target_shape = common_layers.shape_list(targets) hidden_size = x_shape[-1] means, _, _ = get_vq_codebook(codebook_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) targets = tf.reshape(targets, [-1]) one_hot_targets = tf.one_hot(targets, codebook_size) target_means = tf.matmul(one_hot_targets, means) discrete_x, code_loss, distances = vq_body( x, codebook_size, beta=beta, decay=decay, epsilon=epsilon, soft_em=soft_em, num_samples=num_samples, temperature=temperature, do_update=do_update) logits = -distances targets_loss = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=targets) targets_loss = tf.reduce_mean(targets_loss) x_means = tf.matmul(discrete_x, means) x_means = x + tf.stop_gradient(x_means - x) discrete_x = tf.reshape(discrete_x, x_shape[:-1] + [codebook_size]) target_means = tf.reshape(target_means, target_shape + [hidden_size]) return discrete_x, x_means, target_means, code_loss, targets_loss
<SYSTEM_TASK:> Simple discretization through tanh, flip bottleneck_noise many bits. <END_TASK> <USER_TASK:> Description: def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise, discretize_warmup_steps, mode): """Simple discretization through tanh, flip bottleneck_noise many bits."""
x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck") d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0 if mode == tf.estimator.ModeKeys.TRAIN: x += tf.truncated_normal( common_layers.shape_list(x), mean=0.0, stddev=0.2) x = tf.tanh(x) d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x) if mode == tf.estimator.ModeKeys.TRAIN: noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 d *= noise d = common_layers.mix(d, x, discretize_warmup_steps, mode == tf.estimator.ModeKeys.TRAIN) return d, d0
<SYSTEM_TASK:> Simple un-discretization from tanh. <END_TASK> <USER_TASK:> Description: def tanh_discrete_unbottleneck(x, hidden_size): """Simple un-discretization from tanh."""
x = tf.layers.dense(x, hidden_size, name="tanh_discrete_unbottleneck") return x
<SYSTEM_TASK:> Improved semantic hashing bottleneck. <END_TASK> <USER_TASK:> Description: def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise, discretize_warmup_steps, mode, isemhash_noise_dev=0.5, isemhash_mix_prob=0.5): """Improved semantic hashing bottleneck."""
with tf.variable_scope("isemhash_bottleneck"): x = tf.layers.dense(x, bottleneck_bits, name="dense") y = common_layers.saturating_sigmoid(x) if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN: noise = tf.truncated_normal( common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev) y = common_layers.saturating_sigmoid(x + noise) d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y) d = 2.0 * d - 1.0 # Move from [0, 1] to [-1, 1]. if mode == tf.estimator.ModeKeys.TRAIN: # Flip some bits. noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 d *= noise d = common_layers.mix( d, 2.0 * y - 1.0, discretize_warmup_steps, mode == tf.estimator.ModeKeys.TRAIN, max_prob=isemhash_mix_prob) return d, 0.0
<SYSTEM_TASK:> Meta-function calling all the above bottlenecks with hparams. <END_TASK> <USER_TASK:> Description: def parametrized_bottleneck(x, hparams): """Meta-function calling all the above bottlenecks with hparams."""
if hparams.bottleneck_kind == "tanh_discrete": d, _ = tanh_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode) return d, 0.0 if hparams.bottleneck_kind == "isemhash": return isemhash_bottleneck( x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, hparams.discretize_warmup_steps, hparams.mode, hparams.isemhash_noise_dev, hparams.isemhash_mix_prob) if hparams.bottleneck_kind == "vq": return vq_discrete_bottleneck(x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon) if hparams.bottleneck_kind == "em": return vq_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, soft_em=True, num_samples=hparams.vq_num_samples) if hparams.bottleneck_kind == "gumbel_softmax": return gumbel_softmax_discrete_bottleneck( x, hparams.bottleneck_bits, hparams.vq_beta, hparams.vq_decay, hparams.vq_epsilon, hparams.temperature_warmup_steps, hard=False, summary=True) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
<SYSTEM_TASK:> Meta-function calling all the above un-bottlenecks with hparams. <END_TASK> <USER_TASK:> Description: def parametrized_unbottleneck(x, hidden_size, hparams): """Meta-function calling all the above un-bottlenecks with hparams."""
if hparams.bottleneck_kind == "tanh_discrete": return tanh_discrete_unbottleneck(x, hidden_size) if hparams.bottleneck_kind == "isemhash": return isemhash_unbottleneck(x, hidden_size, hparams.isemhash_filter_size_multiplier) if hparams.bottleneck_kind in ["vq", "em", "gumbel_softmax"]: return vq_discrete_unbottleneck(x, hidden_size) raise ValueError( "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind)
<SYSTEM_TASK:> Create hyperpameters for inverse autoregressive flows. <END_TASK> <USER_TASK:> Description: def iaf_hparams(hidden_size=512, filter_size=4096): """Create hyperpameters for inverse autoregressive flows. Args: hidden_size: Width of attention layers and neural network output layer. filter_size: Hidden layer width for neural network. Returns: hparams: Hyperpameters with basic presets for inverse autoregressive flows. """
hparams = common_hparams.basic_params1() # Attention hyperparameters. hparams.hidden_size = hidden_size hparams.add_hparam("attention_key_channels", None) hparams.add_hparam("attention_value_channels", None) hparams.add_hparam("num_heads", 4) hparams.add_hparam("attention_dropout", 0.1) hparams.add_hparam("shared_rel", False) hparams.add_hparam("block_width", 1) hparams.add_hparam("block_length", 1) hparams.add_hparam("q_filter_width", 1) hparams.add_hparam("kv_filter_width", 1) # Preprocessing and postprocesing hyperparameters. hparams.layer_preprocess_sequence = "n" hparams.layer_prepostprocess_dropout = 0.1 hparams.norm_type = "layer" hparams.norm_epsilon = 1e-06 hparams.layer_prepostprocess_dropout_broadcast_dims = "" hparams.layer_postprocess_sequence = "da" # Feedforward neural network hyperparameters. hparams.add_hparam("filter_size", filter_size) hparams.add_hparam("ffn_layer", "conv_hidden_relu") hparams.add_hparam("relu_dropout", 0.1) return hparams
<SYSTEM_TASK:> Returns a set containing the original vocabulary. <END_TASK> <USER_TASK:> Description: def _original_vocab(tmp_dir): """Returns a set containing the original vocabulary. This is important for comparing with published results. Args: tmp_dir: directory containing dataset. Returns: a set of strings """
vocab_url = ("http://download.tensorflow.org/models/LM_LSTM_CNN/" "vocab-2016-09-10.txt") vocab_filename = os.path.basename(vocab_url + ".en") vocab_filepath = os.path.join(tmp_dir, vocab_filename) if not os.path.exists(vocab_filepath): generator_utils.maybe_download(tmp_dir, vocab_filename, vocab_url) return set([ text_encoder.native_to_unicode(l.strip()) for l in tf.gfile.Open(vocab_filepath) ])
<SYSTEM_TASK:> Replace out-of-vocab words with "UNK". <END_TASK> <USER_TASK:> Description: def _replace_oov(original_vocab, line): """Replace out-of-vocab words with "UNK". This maintains compatibility with published results. Args: original_vocab: a set of strings (The standard vocabulary for the dataset) line: a unicode string - a space-delimited sequence of words. Returns: a unicode string - a space-delimited sequence of words. """
return u" ".join( [word if word in original_vocab else u"UNK" for word in line.split()])
<SYSTEM_TASK:> Cycle GAN, main step used for training. <END_TASK> <USER_TASK:> Description: def cycle_gan_internal(inputs, targets, _, hparams): """Cycle GAN, main step used for training."""
with tf.variable_scope("cycle_gan"): # Embed inputs and targets. inputs_orig, targets_orig = tf.to_int32(inputs), tf.to_int32(targets) inputs = common_layers.embedding( inputs_orig, hparams.vocab_size, hparams.hidden_size, "embed") targets = common_layers.embedding( targets_orig, hparams.vocab_size, hparams.hidden_size, "embed", reuse=True) x, _ = split_on_batch(inputs) _, y = split_on_batch(targets) # Y --> X y_fake = generator(y, hparams, "Fy", reuse=False) y_to_x_loss = lossfn(y, y_fake, True, hparams, True, "YtoX") # X --> Y x_fake = generator(x, hparams, "Gx", reuse=False) x_to_y_loss = lossfn(y, x_fake, True, hparams, True, "XtoY") # Cycle-Consistency y_fake_ = generator(y_fake, hparams, "Gx", reuse=True) x_fake_ = generator(x_fake, hparams, "Fy", reuse=True) x_to_x_loss = hparams.cycle_loss_multiplier1 * tf.reduce_mean( tf.abs(x_fake_ - x)) y_to_y_loss = hparams.cycle_loss_multiplier2 * tf.reduce_mean( tf.abs(y_fake_ - y)) cycloss = x_to_x_loss + y_to_y_loss sample_generated = generator(inputs, hparams, "Gx", reuse=True) sample_generated = tf.layers.dense( sample_generated, hparams.vocab_size, name="softmax", reuse=None) sample_generated = tf.stop_gradient( tf.expand_dims(sample_generated, axis=2)) losses = {"cycloss": cycloss, "y_to_x_loss": y_to_x_loss, "x_to_y_loss": x_to_y_loss} return sample_generated, losses
<SYSTEM_TASK:> Hparams for decoding. <END_TASK> <USER_TASK:> Description: def decode_hparams(overrides=""): """Hparams for decoding."""
hparams = decoding.decode_hparams() # Number of interpolations between [0.0, 1.0]. hparams.add_hparam("num_interp", 11) # Which level(s) to interpolate. hparams.add_hparam("level_interp", [0, 1, 2]) # "all" or "ranked", interpolate all channels or a "ranked". hparams.add_hparam("channel_interp", "all") # interpolate channels ranked according to squared L2 norm. hparams.add_hparam("rank_interp", 1) # Whether on not to save frames as summaries hparams.add_hparam("save_frames", True) hparams.parse(overrides) return hparams
<SYSTEM_TASK:> Preprocess frame. <END_TASK> <USER_TASK:> Description: def preprocess_frame(frame): """Preprocess frame. 1. Converts [0, 255] to [-0.5, 0.5] 2. Adds uniform noise. Args: frame: 3-D Tensor representing pixels. Returns: frame: 3-D Tensor with values in between [-0.5, 0.5] """
# Normalize from [0.0, 1.0] -> [-0.5, 0.5] frame = common_layers.convert_rgb_to_real(frame) frame = frame - 0.5 frame, _ = glow_ops.uniform_binning_correction(frame) return frame
<SYSTEM_TASK:> Interpolate between the first input frame and last target frame. <END_TASK> <USER_TASK:> Description: def interpolate(features, hparams, decode_hp): """Interpolate between the first input frame and last target frame. Args: features: dict of tensors hparams: HParams, training hparams. decode_hp: HParams, decode hparams. Returns: images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C) first_frame: image, 3-D Tensor, shape=(1, H, W, C) last_frame: image, 3-D Tensor, shape=(1, H, W, C) """
inputs, targets = features["inputs"], features["targets"] inputs = tf.unstack(inputs, axis=1) targets = tf.unstack(targets, axis=1) coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp) # (X_1, X_t) -> (z_1, z_t) first_frame, last_frame = inputs[0], targets[-1] first_top_z, first_level_eps = frame_to_latents(first_frame, hparams) last_top_z, last_level_eps = frame_to_latents(last_frame, hparams) # Interpolate latents at all levels. first_lats = first_level_eps + [first_top_z] last_lats = last_level_eps + [last_top_z] interp_lats = [] lat_iterator = enumerate(zip(first_lats, last_lats)) for level_ind, (first_lat, last_lat) in lat_iterator: if level_ind in decode_hp.level_interp: if decode_hp.channel_interp == "all": interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs) else: interp_lat = glow_ops.linear_interpolate_rank( first_lat, last_lat, coeffs, decode_hp.rank_interp) else: interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1]) interp_lats.append(interp_lat) level_eps_interp = interp_lats[:hparams.n_levels-1] z_top_interp = interp_lats[-1] images = latents_to_frames(z_top_interp, level_eps_interp, hparams) return images, first_frame, last_frame
<SYSTEM_TASK:> Get nested summaries_log_dir based on decode_hp. <END_TASK> <USER_TASK:> Description: def get_summaries_log_dir(decode_hp, output_dir, dataset_split): """Get nested summaries_log_dir based on decode_hp."""
child_dir = decode_hp.summaries_log_dir level_dir = "".join([str(level) for level in decode_hp.level_interp]) if decode_hp.channel_interp == "all": rank_dir = "all" else: rank_dir = "rank_%d" % decode_hp.rank_interp child_dir = "%s/%s_%s" % (child_dir, level_dir, rank_dir) if dataset_split is not None: child_dir += "_{}".format(dataset_split) return os.path.join(output_dir, child_dir)
<SYSTEM_TASK:> Converts interpolated frames into tf summaries. <END_TASK> <USER_TASK:> Description: def interpolations_to_summary(sample_ind, interpolations, first_frame, last_frame, hparams, decode_hp): """Converts interpolated frames into tf summaries. The summaries consists of: 1. Image summary corresponding to the first frame. 2. Image summary corresponding to the last frame. 3. The interpolated frames as a gif summary. Args: sample_ind: int interpolations: Numpy array, shape=(num_interp, H, W, 3) first_frame: Numpy array, shape=(HWC) last_frame: Numpy array, shape=(HWC) hparams: HParams, train hparams decode_hp: HParams, decode hparams Returns: summaries: list of tf Summary Values. """
parent_tag = "sample_%d" % sample_ind frame_shape = hparams.problem.frame_shape interp_shape = [hparams.batch_size, decode_hp.num_interp] + frame_shape interpolations = np.reshape(interpolations, interp_shape) interp_tag = "%s/interp/%s" % (parent_tag, decode_hp.channel_interp) if decode_hp.channel_interp == "ranked": interp_tag = "%s/rank_%d" % (interp_tag, decode_hp.rank_interp) summaries, _ = common_video.py_gif_summary( interp_tag, interpolations, return_summary_value=True, max_outputs=decode_hp.max_display_outputs, fps=decode_hp.frames_per_second) if decode_hp.save_frames: first_frame_summ = image_utils.image_to_tf_summary_value( first_frame, "%s/first" % parent_tag) last_frame_summ = image_utils.image_to_tf_summary_value( last_frame, "%s/last" % parent_tag) summaries.append(first_frame_summ) summaries.append(last_frame_summ) return summaries
<SYSTEM_TASK:> Create slot variables for Adam with accumulated gradients. <END_TASK> <USER_TASK:> Description: def _create_slots(self, var_list): """Create slot variables for Adam with accumulated gradients."""
super(MultistepAdamOptimizer, self)._create_slots(var_list) first_var = min(var_list, key=lambda x: x.name) self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1, name="iter", colocate_with=first_var) for v in var_list: self._zeros_slot(v, "grad_acc", self._name)
<SYSTEM_TASK:> Apply conditionally if counter is zero. <END_TASK> <USER_TASK:> Description: def _apply_cond(self, apply_fn, grad, var, *args, **kwargs): """Apply conditionally if counter is zero."""
grad_acc = self.get_slot(var, "grad_acc") def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs): total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype) adam_op = apply_fn(total_grad, var, *args, **kwargs) with tf.control_dependencies([adam_op]): grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc), use_locking=self._use_locking) return tf.group(adam_op, grad_acc_to_zero_op) def accumulate_gradient(grad_acc, grad): assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking) return tf.group(assign_op) # Strip return value return tf.cond( tf.equal(self._get_iter_variable(), 0), lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs), lambda: accumulate_gradient(grad_acc, grad))
<SYSTEM_TASK:> Updates beta_power variables every n batches and incrs counter. <END_TASK> <USER_TASK:> Description: def _finish(self, update_ops, name_scope): """Updates beta_power variables every n batches and incrs counter."""
iter_ = self._get_iter_variable() beta1_power, beta2_power = self._get_beta_accumulators() with tf.control_dependencies(update_ops): with tf.colocate_with(iter_): def update_beta_op(): update_beta1 = beta1_power.assign( beta1_power * self._beta1_t, use_locking=self._use_locking) update_beta2 = beta2_power.assign( beta2_power * self._beta2_t, use_locking=self._use_locking) return tf.group(update_beta1, update_beta2) maybe_update_beta = tf.cond( tf.equal(iter_, 0), update_beta_op, tf.no_op) with tf.control_dependencies([maybe_update_beta]): update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t), use_locking=self._use_locking) return tf.group( *update_ops + [update_iter, maybe_update_beta], name=name_scope)
<SYSTEM_TASK:> Over which devices do we split each training batch. <END_TASK> <USER_TASK:> Description: def data_parallelism_from_flags(daisy_chain_variables=True, all_workers=False): """Over which devices do we split each training batch. In old-fashioned async mode, we split the batch over all GPUs on the current worker. In sync mode, we split the batch over all the parameter server GPUs. This function returns an expert_utils.Parallelism object, which can be used to build the model. It is configured in a way that any variables created by `tf.get_variable` will be assigned to the parameter servers and shared between datashards. Args: daisy_chain_variables: whether to copy variables in a daisy chain on GPUs. all_workers: whether the devices are all async workers or just this one. Returns: a expert_utils.Parallelism. """
dp_arg_names = inspect.getargspec(data_parallelism).args blacklist = ["daisy_chain_variables", "all_workers"] kwargs = {} for arg in dp_arg_names: if arg in blacklist: continue kwargs[arg] = getattr(tf.flags.FLAGS, arg) return data_parallelism( daisy_chain_variables=daisy_chain_variables, all_workers=all_workers, **kwargs)
<SYSTEM_TASK:> Generate concatenated lines from file upto up_threshold characters. <END_TASK> <USER_TASK:> Description: def concat_generator(filename, up_threshold, low_threshold=10): """Generate concatenated lines from file upto up_threshold characters."""
txt = "" for line in tf.gfile.Open(filename): line = line.strip() if len(txt) + len(line) + 1 >= up_threshold: ret = txt txt = "" # We don't yield very short long parts to prevent noisy examples. if len(ret) > low_threshold and len(ret) < up_threshold: yield {"targets": ret} if not txt: txt = line else: txt = " ".join([txt, line])
<SYSTEM_TASK:> Given python generators, generate from one, then from another, etc. <END_TASK> <USER_TASK:> Description: def mix_generators(generator_list): """Given python generators, generate from one, then from another, etc."""
i = 0 l = len(generator_list) stopiters_seen = 0 while stopiters_seen <= l: try: yield six.next(generator_list[i % l]) i += 1 stopiters_seen = 0 except StopIteration: i += 1 stopiters_seen += 1
<SYSTEM_TASK:> Compute BLEU core summaries using the decoder output. <END_TASK> <USER_TASK:> Description: def compute_bleu_summaries(hook_args): """Compute BLEU core summaries using the decoder output. Args: hook_args: DecodeHookArgs namedtuple Returns: A list of tf.Summary values if hook_args.hparams contains the reference file and the translated file. """
decode_hparams = hook_args.decode_hparams if not (decode_hparams.decode_reference and decode_hparams.decode_to_file): return None values = [] bleu = 100 * bleu_hook.bleu_wrapper( decode_hparams.decode_reference, decode_hparams.decode_to_file) values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu)) tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu)) if hook_args.hparams.mlperf_mode: current_step = decode_hparams.mlperf_decode_step mlperf_log.transformer_print( key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold) mlperf_log.transformer_print( key=mlperf_log.EVAL_ACCURACY, value={ "epoch": max(current_step // decode_hparams.iterations_per_loop - 1, 0), "value": bleu }) mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP) if bleu >= decode_hparams.mlperf_threshold: decode_hparams.set_hparam("mlperf_success", True) return values
<SYSTEM_TASK:> Get vocab for distill problems. <END_TASK> <USER_TASK:> Description: def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): """Get vocab for distill problems."""
# We assume that vocab file is present in data_dir directory where the # data generated will be stored. vocab_filepath = os.path.join(data_dir, self.vocab_filename) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
<SYSTEM_TASK:> Set hparams overrides from unparsed args list. <END_TASK> <USER_TASK:> Description: def set_hparams_from_args(args): """Set hparams overrides from unparsed args list."""
if not args: return hp_prefix = "--hp_" tf.logging.info("Found unparsed command-line arguments. Checking if any " "start with %s and interpreting those as hparams " "settings.", hp_prefix) pairs = [] i = 0 while i < len(args): arg = args[i] if arg.startswith(hp_prefix): pairs.append((arg[len(hp_prefix):], args[i+1])) i += 2 else: tf.logging.warn("Found unknown flag: %s", arg) i += 1 as_hparams = ",".join(["%s=%s" % (key, val) for key, val in pairs]) if FLAGS.hparams: as_hparams = "," + as_hparams FLAGS.hparams += as_hparams
<SYSTEM_TASK:> A stack of convolution blocks with residual connection. <END_TASK> <USER_TASK:> Description: def residual_block(x, hparams): """A stack of convolution blocks with residual connection."""
k = (hparams.kernel_height, hparams.kernel_width) dilations_and_kernels = [((1, 1), k) for _ in range(3)] y = common_layers.subseparable_conv_block( x, hparams.hidden_size, dilations_and_kernels, padding="SAME", separability=0, name="residual_block") x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm") return tf.nn.dropout(x, 1.0 - hparams.dropout)
<SYSTEM_TASK:> Gets to 2.92 in just under 4 days on 8 p100s. <END_TASK> <USER_TASK:> Description: def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p(): """Gets to 2.92 in just under 4 days on 8 p100s."""
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l() hparams.num_decoder_layers = 14 hparams.batch_size = 8 hparams.layer_prepostprocess_dropout = 0.2 return hparams
<SYSTEM_TASK:> hparams fo 12 layer big 1d model for imagenet 64x64. <END_TASK> <USER_TASK:> Description: def imagetransformer1d_base_8l_64by64(): """hparams fo 12 layer big 1d model for imagenet 64x64."""
hparams = image_transformer_base() hparams.num_heads = 8 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.num_decoder_layers = 8 hparams.batch_size = 1 hparams.block_length = 512 hparams.block_width = 768 hparams.layer_prepostprocess_dropout = 0.1 hparams.max_length = 14000 hparams.unconditional = int(False) return hparams
<SYSTEM_TASK:> Set of hyperparameters for a very small imagetransformer with MoE. <END_TASK> <USER_TASK:> Description: def imagetransformer_moe_tiny(): """Set of hyperparameters for a very small imagetransformer with MoE."""
hparams = imagetransformer_tiny() hparams.hidden_size = 64 hparams.batch_size = 1 hparams.num_hidden_layers = 3 hparams.dec_attention_type = cia.AttentionType.MOE_LOCAL_1D hparams.add_hparam("moe_layers_decoder", "1") # Which layer is MoE. hparams.moe_hidden_sizes = "1024" # Hidden layer sizes (comma-separated). hparams.moe_num_experts = 16 # Number of experts in each MoE layer. hparams.moe_k = 2 # How many experts to use per batch element (try 2 or 4). hparams.moe_loss_coef = 1e-2 # MoE loss coefficient (1e-2 is usually ok). return hparams
<SYSTEM_TASK:> Hparams for training imagetransformer on tpu. <END_TASK> <USER_TASK:> Description: def imagetransformer_sep_channels_8l_tpu(): """Hparams for training imagetransformer on tpu."""
hparams = imagetransformer_sep_channels_8l() update_hparams_for_tpu(hparams) hparams.batch_size = 4 hparams.num_heads = 4 # heads are expensive on tpu hparams.shared_embedding_and_softmax_weights = False return hparams
<SYSTEM_TASK:> Context manager wrapping the training loop, updates step counters. <END_TASK> <USER_TASK:> Description: def training_loop(self): """Context manager wrapping the training loop, updates step counters."""
if not self.restarting: self._write_counters(self._local_step_at_start, self._global_step) tf.logging.info( "Training %s up to %d, %d to go", self.model_mode, self.target_local_step, self.steps_to_go ) yield self._write_counters(self.target_local_step, -1)
<SYSTEM_TASK:> Reads words from a file. <END_TASK> <USER_TASK:> Description: def _read_words(filename): """Reads words from a file."""
with tf.gfile.GFile(filename, "r") as f: if sys.version_info[0] >= 3: return f.read().replace("\n", " %s " % EOS).split() else: return f.read().decode("utf-8").replace("\n", " %s " % EOS).split()
<SYSTEM_TASK:> Reads a file to build a vocabulary of `vocab_size` most common words. <END_TASK> <USER_TASK:> Description: def _build_vocab(filename, vocab_path, vocab_size): """Reads a file to build a vocabulary of `vocab_size` most common words. The vocabulary is sorted by occurrence count and has one word per line. Originally from: https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/reader.py Args: filename: file to read list of words from. vocab_path: path where to save the vocabulary. vocab_size: size of the vocabulary to generate. """
data = _read_words(filename) counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) words = words[:vocab_size] with open(vocab_path, "w") as f: f.write("\n".join(words))
<SYSTEM_TASK:> Reads from file and returns a `TokenTextEncoder` for the vocabulary. <END_TASK> <USER_TASK:> Description: def _get_token_encoder(vocab_dir, vocab_name, filename): """Reads from file and returns a `TokenTextEncoder` for the vocabulary."""
vocab_path = os.path.join(vocab_dir, vocab_name) if not tf.gfile.Exists(vocab_path): _build_vocab(filename, vocab_path, 10000) return text_encoder.TokenTextEncoder(vocab_path)
<SYSTEM_TASK:> Normalize attention matrices and reshape as necessary. <END_TASK> <USER_TASK:> Description: def resize(att_mat, max_length=None): """Normalize attention matrices and reshape as necessary."""
for i, att in enumerate(att_mat): # Add extra batch dim for viz code to work. if att.ndim == 3: att = np.expand_dims(att, axis=0) if max_length is not None: # Sum across different attention values for each token. att = att[:, :, :max_length, :max_length] row_sums = np.sum(att, axis=2) # Normalize att /= row_sums[:, :, np.newaxis] att_mat[i] = att return att_mat
<SYSTEM_TASK:> Compute representation of the attention ready for the d3 visualization. <END_TASK> <USER_TASK:> Description: def _get_attention(inp_text, out_text, enc_atts, dec_atts, encdec_atts): """Compute representation of the attention ready for the d3 visualization. Args: inp_text: list of strings, words to be displayed on the left of the vis out_text: list of strings, words to be displayed on the right of the vis enc_atts: numpy array, encoder self-attentions [num_layers, batch_size, num_heads, enc_length, enc_length] dec_atts: numpy array, decoder self-attentions [num_layers, batch_size, num_heads, dec_length, dec_length] encdec_atts: numpy array, encoder-decoder attentions [num_layers, batch_size, num_heads, dec_length, enc_length] Returns: Dictionary of attention representations with the structure: { 'all': Representations for showing all attentions at the same time. 'inp_inp': Representations for showing encoder self-attentions 'inp_out': Representations for showing encoder-decoder attentions 'out_out': Representations for showing decoder self-attentions } and each sub-dictionary has structure: { 'att': list of inter attentions matrices, one for each attention head 'top_text': list of strings, words to be displayed on the left of the vis 'bot_text': list of strings, words to be displayed on the right of the vis } """
def get_full_attention(layer): """Get the full input+output - input+output attentions.""" enc_att = enc_atts[layer][0] dec_att = dec_atts[layer][0] encdec_att = encdec_atts[layer][0] enc_att = np.transpose(enc_att, [0, 2, 1]) dec_att = np.transpose(dec_att, [0, 2, 1]) encdec_att = np.transpose(encdec_att, [0, 2, 1]) # [heads, query_length, memory_length] enc_length = enc_att.shape[1] dec_length = dec_att.shape[1] num_heads = enc_att.shape[0] first = np.concatenate([enc_att, encdec_att], axis=2) second = np.concatenate( [np.zeros((num_heads, dec_length, enc_length)), dec_att], axis=2) full_att = np.concatenate([first, second], axis=1) return [ha.T.tolist() for ha in full_att] def get_inp_inp_attention(layer): att = np.transpose(enc_atts[layer][0], (0, 2, 1)) return [ha.T.tolist() for ha in att] def get_out_inp_attention(layer): att = np.transpose(encdec_atts[layer][0], (0, 2, 1)) return [ha.T.tolist() for ha in att] def get_out_out_attention(layer): att = np.transpose(dec_atts[layer][0], (0, 2, 1)) return [ha.T.tolist() for ha in att] def get_attentions(get_attention_fn): num_layers = len(enc_atts) return [get_attention_fn(i) for i in range(num_layers)] attentions = { 'all': { 'att': get_attentions(get_full_attention), 'top_text': inp_text + out_text, 'bot_text': inp_text + out_text, }, 'inp_inp': { 'att': get_attentions(get_inp_inp_attention), 'top_text': inp_text, 'bot_text': inp_text, }, 'inp_out': { 'att': get_attentions(get_out_inp_attention), 'top_text': inp_text, 'bot_text': out_text, }, 'out_out': { 'att': get_attentions(get_out_out_attention), 'top_text': out_text, 'bot_text': out_text, }, } return attentions
<SYSTEM_TASK:> Decode a list of tokens to a unicode string. <END_TASK> <USER_TASK:> Description: def decode(tokens): """Decode a list of tokens to a unicode string. Args: tokens: a list of Unicode strings Returns: a unicode string """
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens] ret = [] for i, token in enumerate(tokens): if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]: ret.append(u" ") ret.append(token) return "".join(ret)
<SYSTEM_TASK:> Reads files matching a wildcard pattern, yielding the contents. <END_TASK> <USER_TASK:> Description: def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True): """Reads files matching a wildcard pattern, yielding the contents. Args: filepattern: A wildcard pattern matching one or more files. max_lines: If set, stop reading after reading this many lines. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Yields: The contents of the files as lines, if split_on_newlines is True, or the entire contents of each file if False. """
filenames = sorted(tf.gfile.Glob(filepattern)) lines_read = 0 for filename in filenames: with tf.gfile.Open(filename) as f: if split_on_newlines: for line in f: yield line.strip() lines_read += 1 if max_lines and lines_read >= max_lines: return else: if max_lines: doc = [] for line in f: doc.append(line) lines_read += 1 if max_lines and lines_read >= max_lines: yield "".join(doc) return yield "".join(doc) else: yield f.read()
<SYSTEM_TASK:> Read the corpus and compute a dictionary of token counts. <END_TASK> <USER_TASK:> Description: def corpus_token_counts( text_filepattern, corpus_max_lines, split_on_newlines=True): """Read the corpus and compute a dictionary of token counts. Args: text_filepattern: A pattern matching one or more files. corpus_max_lines: An integer; maximum total lines to read. split_on_newlines: A boolean. If true, then split files by lines and strip leading and trailing whitespace from each line. Otherwise, treat each file as a single string. Returns: a dictionary mapping token to count. """
counts = collections.Counter() for doc in _read_filepattern( text_filepattern, max_lines=corpus_max_lines, split_on_newlines=split_on_newlines): counts.update(encode(_native_to_unicode(doc))) mlperf_log.transformer_print( key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(counts)) return counts
<SYSTEM_TASK:> Read a vocab file and return a dictionary of token counts. <END_TASK> <USER_TASK:> Description: def vocab_token_counts(text_filepattern, max_lines): """Read a vocab file and return a dictionary of token counts. Reads a two-column CSV file of tokens and their frequency in a dataset. The tokens are presumed to be generated by encode() or the equivalent. Args: text_filepattern: A pattern matching one or more files. max_lines: An integer; maximum total lines to read. Returns: a dictionary mapping token to count. """
ret = {} for i, line in enumerate( _read_filepattern(text_filepattern, max_lines=max_lines)): if "," not in line: tf.logging.warning("Malformed vocab line #%d '%s'", i, line) continue token, count = line.rsplit(",", 1) ret[_native_to_unicode(token)] = int(count) return ret
<SYSTEM_TASK:> Make a tf.train.Example for the problem. <END_TASK> <USER_TASK:> Description: def _make_example(input_ids, problem, input_feature_name="inputs"): """Make a tf.train.Example for the problem. features[input_feature_name] = input_ids Also fills in any other required features with dummy values. Args: input_ids: list<int>. problem: Problem. input_feature_name: name of feature for input_ids. Returns: tf.train.Example """
features = { input_feature_name: tf.train.Feature(int64_list=tf.train.Int64List(value=input_ids)) } # Fill in dummy values for any other required features that presumably # will not actually be used for prediction. data_fields, _ = problem.example_reading_spec() for fname, ftype in data_fields.items(): if fname == input_feature_name: continue if not isinstance(ftype, tf.FixedLenFeature): # Only FixedLenFeatures are required continue if ftype.default_value is not None: # If there's a default value, no need to fill it in continue num_elements = functools.reduce(lambda acc, el: acc * el, ftype.shape, 1) if ftype.dtype in [tf.int32, tf.int64]: value = tf.train.Feature( int64_list=tf.train.Int64List(value=[0] * num_elements)) if ftype.dtype in [tf.float32, tf.float64]: value = tf.train.Feature( float_list=tf.train.FloatList(value=[0.] * num_elements)) if ftype.dtype == tf.bytes: value = tf.train.Feature( bytes_list=tf.train.BytesList(value=[""] * num_elements)) tf.logging.info("Adding dummy value for feature %s as it is required by " "the Problem.", fname) features[fname] = value return tf.train.Example(features=tf.train.Features(feature=features))
<SYSTEM_TASK:> Wraps function to make grpc requests with runtime args. <END_TASK> <USER_TASK:> Description: def make_grpc_request_fn(servable_name, server, timeout_secs): """Wraps function to make grpc requests with runtime args."""
stub = _create_stub(server) def _make_grpc_request(examples): """Builds and sends request to TensorFlow model server.""" request = predict_pb2.PredictRequest() request.model_spec.name = servable_name request.inputs["input"].CopyFrom( tf.make_tensor_proto( [ex.SerializeToString() for ex in examples], shape=[len(examples)])) response = stub.Predict(request, timeout_secs) outputs = tf.make_ndarray(response.outputs["outputs"]) scores = tf.make_ndarray(response.outputs["scores"]) assert len(outputs) == len(scores) return [{ # pylint: disable=g-complex-comprehension "outputs": output, "scores": score } for output, score in zip(outputs, scores)] return _make_grpc_request
<SYSTEM_TASK:> Wraps function to make CloudML Engine requests with runtime args. <END_TASK> <USER_TASK:> Description: def make_cloud_mlengine_request_fn(credentials, model_name, version): """Wraps function to make CloudML Engine requests with runtime args."""
def _make_cloud_mlengine_request(examples): """Builds and sends requests to Cloud ML Engine.""" api = discovery.build("ml", "v1", credentials=credentials) parent = "projects/%s/models/%s/versions/%s" % (cloud.default_project(), model_name, version) input_data = { "instances": [{ # pylint: disable=g-complex-comprehension "input": { "b64": base64.b64encode(ex.SerializeToString()) } } for ex in examples] } prediction = api.projects().predict(body=input_data, name=parent).execute() return prediction["predictions"] return _make_cloud_mlengine_request
<SYSTEM_TASK:> Encodes inputs, makes request to deployed TF model, and decodes outputs. <END_TASK> <USER_TASK:> Description: def predict(inputs_list, problem, request_fn): """Encodes inputs, makes request to deployed TF model, and decodes outputs."""
assert isinstance(inputs_list, list) fname = "inputs" if problem.has_inputs else "targets" input_encoder = problem.feature_info[fname].encoder input_ids_list = [ _encode(inputs, input_encoder, add_eos=problem.has_inputs) for inputs in inputs_list ] examples = [_make_example(input_ids, problem, fname) for input_ids in input_ids_list] predictions = request_fn(examples) output_decoder = problem.feature_info["targets"].encoder outputs = [ (_decode(prediction["outputs"], output_decoder), prediction["scores"]) for prediction in predictions ] return outputs
<SYSTEM_TASK:> Basic 2-frame recurrent model with stochastic tower. <END_TASK> <USER_TASK:> Description: def next_frame_basic_recurrent(): """Basic 2-frame recurrent model with stochastic tower."""
hparams = basic_stochastic.next_frame_basic_stochastic_discrete() hparams.filter_double_steps = 2 hparams.hidden_size = 64 hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 4 hparams.concat_internal_states = False hparams.add_hparam("num_lstm_layers", 2) hparams.add_hparam("num_lstm_filters", 256) return hparams
<SYSTEM_TASK:> Mean of the inputs but counting only those where targets != mask_id. <END_TASK> <USER_TASK:> Description: def masked_mean(inputs, targets, mask_id=None): """Mean of the inputs but counting only those where targets != mask_id."""
inputs = [x.astype(np.float32) for x in inputs] # We assume all elements in the list contribute equally. # TODO(lukaszkaiser): remove this assumption (e.g., when masks differ). length = len(inputs) if mask_id is None: # TODO(lukaszkaiser): can we just divide the sum by length? XLA optimizes? return sum([np.mean(x) / length for x in inputs]) unmask = [1.0 - np.equal(t, mask_id).astype(np.float32) for t in targets] return sum([np.sum(x * m) / (length * np.sum(m)) for x, m in zip(inputs, unmask)])
<SYSTEM_TASK:> Save State and optionally gin config. <END_TASK> <USER_TASK:> Description: def save_state(state, output_dir, keep=False): """Save State and optionally gin config."""
params_file = os.path.join(output_dir, "model.pkl") with gfile.GFile(params_file, "wb") as f: pickle.dump((state.params, state.step, state.history), f) if keep: params_file = os.path.join(output_dir, "model_{}.pkl".format(state.step)) with gfile.GFile(params_file, "wb") as f: pickle.dump((state.params, state.step, state.history), f) log("Model saved to %s" % params_file, stdout=False)
<SYSTEM_TASK:> Evalaute on train and eval data, and log metrics. <END_TASK> <USER_TASK:> Description: def evaluate_train_and_eval(step, inputs, predict_fun, eval_steps, rng, train_sw=None, eval_sw=None, history=None): """Evalaute on train and eval data, and log metrics."""
step_log(step, "Evaluation") train_metrics, eval_metrics = [ evaluate( # pylint: disable=g-complex-comprehension itertools.islice(input_stream(), eval_steps), predict_fun, _METRICS, rng) for input_stream in [inputs.train_eval_stream, inputs.eval_stream]] if train_sw: log_metrics(train_metrics, train_sw, "train", step, history=history) if eval_sw: log_metrics(eval_metrics, eval_sw, "eval", step, history=history) step_log(step, "Finished evaluation") return train_metrics, eval_metrics
<SYSTEM_TASK:> Log metrics to summary writer and history. <END_TASK> <USER_TASK:> Description: def log_metrics(metrics, summ_writer, log_prefix, step, history=None): """Log metrics to summary writer and history."""
rjust_len = max([len(name) for name in metrics]) for name, value in six.iteritems(metrics): step_log(step, "%s %s | % .8f" % ( log_prefix.ljust(5), name.rjust(rjust_len), value)) full_name = "metrics/" + name if history: history.append(log_prefix, full_name, step, value) if summ_writer: summ_writer.scalar(full_name, value, step)
<SYSTEM_TASK:> Get a JAX random number generator and set random seed everywhere. <END_TASK> <USER_TASK:> Description: def get_random_number_generator_and_set_seed(seed=None): """Get a JAX random number generator and set random seed everywhere."""
random.seed(seed) # While python random accepts None as seed and uses time/os seed then, # some other functions expect integers so we create one here. if seed is None: seed = random.randint(0, 2**31 - 1) tf.set_random_seed(seed) numpy.random.seed(seed) return jax_random.get_prng(seed)
<SYSTEM_TASK:> Iterator over epochs until steps is reached. 1-indexed. <END_TASK> <USER_TASK:> Description: def epochs(steps=None, epoch_steps=1): """Iterator over epochs until steps is reached. 1-indexed. Args: steps: int, total number of steps. Infinite if None. epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to enable variable length epochs. Yields: (epoch: int, epoch id, epoch_steps: int, number of steps in this epoch) """
try: iter(epoch_steps) except TypeError: epoch_steps = itertools.repeat(epoch_steps) step = 0 for epoch, epoch_steps in enumerate(epoch_steps): epoch_steps = min(epoch_steps, steps - step) yield (epoch + 1, epoch_steps) step += epoch_steps if steps and step >= steps: break
<SYSTEM_TASK:> Use jit on model_predict if required. <END_TASK> <USER_TASK:> Description: def _jit_predict_fun(model_predict, num_devices): """Use jit on model_predict if required."""
def predict(x, params=(), rng=None): """Predict function jited and parallelized as requested.""" # On one device, jit and run. if num_devices == 1: return backend.jit(model_predict)(x, params, rng=rng) # Multi-devices, pmap and run. @functools.partial(backend.pmap, axis_name="batch") def mapped_predict(x, params, rng): return model_predict(x, params, rng=rng) pred = mapped_predict( reshape_by_device(x, num_devices), params, jax_random.split(rng, num_devices)) # Need to reduce the [device, per-device-batch, ...] tensors back to # a [batch, ...] tensor. The tensors may be nested. if not isinstance(x, (list, tuple)): # Not nested. batch_size = x.shape[0] return np.reshape(pred, [batch_size] + list(pred.shape[2:])) batch_size = x[0].shape[0] return [np.reshape(p, [batch_size] + list(p.shape[2:])) for p in pred] return predict
<SYSTEM_TASK:> Computes the number of input and output units for a weight shape. <END_TASK> <USER_TASK:> Description: def _compute_fans(shape): """Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of scalars (fan_in, fan_out). """
if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1. for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size if isinstance(fan_in, tf.Dimension): fan_in = fan_in.value if isinstance(fan_out, tf.Dimension): fan_out = fan_out.value return fan_in, fan_out
<SYSTEM_TASK:> Getter for loading from strings; returns value if can't load. <END_TASK> <USER_TASK:> Description: def get(identifier, value=None): """Getter for loading from strings; returns value if can't load."""
if value is None: value = identifier if identifier is None: return None elif isinstance(identifier, dict): try: return deserialize(identifier) except ValueError: return value elif isinstance(identifier, six.string_types): config = {'class_name': str(identifier), 'config': {}} try: return deserialize(config) except ValueError: return value elif callable(identifier): return identifier return value
<SYSTEM_TASK:> Creates a time-step and appends it to the list. <END_TASK> <USER_TASK:> Description: def add_time_step(self, **create_time_step_kwargs): """Creates a time-step and appends it to the list. Args: **create_time_step_kwargs: Forwarded to time_step.TimeStep.create_time_step. """
ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs) assert isinstance(ts, time_step.TimeStep) self._time_steps.append(ts)
<SYSTEM_TASK:> Replace the last time-steps with the given kwargs. <END_TASK> <USER_TASK:> Description: def change_last_time_step(self, **replace_time_step_kwargs): """Replace the last time-steps with the given kwargs."""
# Pre-conditions: self._time_steps shouldn't be empty. assert self._time_steps self._time_steps[-1] = self._time_steps[-1].replace( **replace_time_step_kwargs)
<SYSTEM_TASK:> Returns a tuple of sum of raw and processed rewards. <END_TASK> <USER_TASK:> Description: def reward(self): """Returns a tuple of sum of raw and processed rewards."""
raw_rewards, processed_rewards = 0, 0 for ts in self.time_steps: # NOTE: raw_reward and processed_reward are None for the first time-step. if ts.raw_reward is not None: raw_rewards += ts.raw_reward if ts.processed_reward is not None: processed_rewards += ts.processed_reward return raw_rewards, processed_rewards
<SYSTEM_TASK:> Completes the given trajectory at the given index. <END_TASK> <USER_TASK:> Description: def _complete_trajectory(self, trajectory, index): """Completes the given trajectory at the given index."""
assert isinstance(trajectory, Trajectory) # This *should* be the case. assert trajectory.last_time_step.action is None # Add to completed trajectories. self._completed_trajectories.append(trajectory) # Make a new one to replace it. self._trajectories[index] = Trajectory()
<SYSTEM_TASK:> Resets trajectories at given indices and populates observations. <END_TASK> <USER_TASK:> Description: def reset(self, indices, observations): """Resets trajectories at given indices and populates observations. Reset can either be called right at the beginning, when there are no time-steps, or to reset a currently active trajectory. If resetting a currently active trajectory then we save it in self._completed_trajectories. Args: indices: 1-D np.ndarray stating the indices to reset. observations: np.ndarray of shape (indices len, obs.shape) of observations """
# Pre-conditions: indices, observations are np arrays. # : indices is one-dimensional. # : their first dimension (batch) is the same. assert isinstance(indices, np.ndarray) assert len(indices.shape) == 1 assert isinstance(observations, np.ndarray) assert indices.shape[0] == observations.shape[0] for index, observation in zip(indices, observations): trajectory = self._trajectories[index] # Are we starting a new trajectory at the given index? if not trajectory.is_active: # Then create a new time-step here with the given observation. trajectory.add_time_step(observation=observation) # That's all we need to do here. continue # If however we are resetting a currently active trajectory then we need # to put that in self._completed_trajectories and make a new trajectory # with the current observation. # TODO(afrozm): Should we mark these are done? Or is the done=False and # this being the last time-step in the trajectory good enough to recognize # that this was reset? # Mark trajectory as completed and move into completed_trajectories. self._complete_trajectory(trajectory, index) # Put the observation in the newly created trajectory. # TODO(afrozm): Add 0 reward. self._trajectories[index].add_time_step(observation=observation)