INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
A stack of separable convolution blocks with residual connections.
def multi_conv_res(x, padding, name, layers, hparams, mask=None, source=None): """A stack of separable convolution blocks with residual connections.""" with tf.variable_scope(name): padding_bias = None if mask is not None: padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. if padding == "LEFT": # Do not mask anything when left-padding. mask = None if (hparams.kernel_scheme in _KERNEL_SCHEMES and hparams.dilation_scheme in _DILATION_SCHEMES): kernels = _KERNEL_SCHEMES[hparams.kernel_scheme] dilations = _DILATION_SCHEMES[hparams.dilation_scheme] dilations_and_kernels = list(zip(dilations, kernels)) dilations_and_kernels1 = dilations_and_kernels[:2] dilations_and_kernels2 = dilations_and_kernels[2:] else: k = (hparams.kernel_height, hparams.kernel_width) k2 = (hparams.large_kernel_size, 1) dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)] dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)] separabilities1 = [hparams.separability, hparams.separability] separabilities2 = [hparams.separability] * len(dilations_and_kernels2) if hparams.separability < 0: separabilities1 = [hparams.separability - 1, hparams.separability] separabilities2 = [ hparams.separability - i for i in reversed(range(len(dilations_and_kernels2))) ] def norm_fn(x, name): with tf.variable_scope(name, default_name="norm"): return common_layers.apply_norm( x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) for layer in range(layers): with tf.variable_scope("layer_%d" % layer): y = common_layers.subseparable_conv_block( x, hparams.hidden_size, dilations_and_kernels1, normalizer_fn=norm_fn, padding=padding, mask=mask, separabilities=separabilities1, name="residual1") x += common_layers.subseparable_conv_block( x + y, hparams.hidden_size, dilations_and_kernels2, normalizer_fn=norm_fn, padding=padding, mask=mask, separabilities=separabilities2, name="residual2") + y if source is not None and hparams.attention_type != "none": x += attention(x, source, norm_fn, hparams, bias=padding_bias) if mask is not None: x *= mask return tf.nn.dropout(x, 1.0 - hparams.dropout)
Experimental rank loss, thanks to kkurach@ for the code.
def rank_loss(sentence_emb, image_emb, margin=0.2): """Experimental rank loss, thanks to kkurach@ for the code.""" with tf.name_scope("rank_loss"): # Normalize first as this is assumed in cosine similarity later. sentence_emb = tf.nn.l2_normalize(sentence_emb, 1) image_emb = tf.nn.l2_normalize(image_emb, 1) # Both sentence_emb and image_emb have size [batch, depth]. scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch] diagonal = tf.diag_part(scores) # [batch] cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch] cost_im = tf.maximum( 0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch] # Clear diagonals. batch_size = tf.shape(sentence_emb)[0] empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size) cost_s *= empty_diagonal_mat cost_im *= empty_diagonal_mat return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
Loss telling to be more similar to your own targets than to others.
def similarity_cost(inputs_encoded, targets_encoded): """Loss telling to be more similar to your own targets than to others.""" # This is a first very simple version: handle variable-length by padding # to same length and putting everything into batch. In need of a better way. x, y = common_layers.pad_to_same_length(inputs_encoded, targets_encoded) depth = tf.shape(inputs_encoded)[3] x, y = tf.reshape(x, [-1, depth]), tf.reshape(y, [-1, depth]) return rank_loss(x, y)
Middle part of slicenet, connecting encoder and decoder.
def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams): """Middle part of slicenet, connecting encoder and decoder.""" def norm_fn(x, name): with tf.variable_scope(name, default_name="norm"): return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) # Flatten targets and embed target_space_id. targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2) target_space_emb = tf.tile(target_space_emb, [tf.shape(targets_flat)[0], 1, 1, 1]) # Use attention from each target to look at input and retrieve. targets_shifted = common_layers.shift_right( targets_flat, pad_value=target_space_emb) if hparams.attention_type == "none": targets_with_attention = tf.zeros_like(targets_shifted) else: inputs_padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. targets_with_attention = attention( targets_shifted, inputs_encoded, norm_fn, hparams, bias=inputs_padding_bias) # Positional targets: merge attention and raw. kernel = (hparams.kernel_height, hparams.kernel_width) targets_merged = common_layers.subseparable_conv_block( tf.concat([targets_with_attention, targets_shifted], axis=3), hparams.hidden_size, [((1, 1), kernel)], normalizer_fn=norm_fn, padding="LEFT", separability=4, name="targets_merge") return targets_merged, 0.0
Input embeddings -> is_padding.
def embedding_to_padding(emb): """Input embeddings -> is_padding.""" emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1, keep_dims=True) return tf.to_float(tf.equal(emb_sum, 0.0))
The slicenet model, main step used for training.
def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True): """The slicenet model, main step used for training.""" with tf.variable_scope("slicenet"): # Project to hidden size if necessary if inputs.get_shape().as_list()[-1] != hparams.hidden_size: inputs = common_layers.conv_block( inputs, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True) # Flatten inputs and encode. inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2) inputs_mask = 1.0 - embedding_to_padding(inputs) inputs = common_layers.add_timing_signal(inputs) # Add position info. target_space_emb = embed_target_space(target_space, hparams.hidden_size) extra_layers = int(hparams.num_hidden_layers * 1.5) inputs_encoded = multi_conv_res( inputs, "SAME", "encoder", extra_layers, hparams, mask=inputs_mask) if not run_decoder: return inputs_encoded # Do the middle part. decoder_start, similarity_loss = slicenet_middle( inputs_encoded, targets, target_space_emb, inputs_mask, hparams) # Decode. decoder_final = multi_conv_res( decoder_start, "LEFT", "decoder", hparams.num_hidden_layers, hparams, mask=inputs_mask, source=inputs_encoded) return decoder_final, tf.reduce_mean(similarity_loss)
Set of hyperparameters.
def slicenet_params1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.batch_size = 1024 hparams.hidden_size = 768 hparams.dropout = 0.5 hparams.symbol_dropout = 0.2 hparams.label_smoothing = 0.1 hparams.clip_grad_norm = 2.0 hparams.num_hidden_layers = 4 hparams.kernel_height = 3 hparams.kernel_width = 1 hparams.norm_type = "layer" hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 hparams.weight_decay = 3.0 hparams.num_sampled_classes = 0 hparams.sampling_method = "argmax" hparams.optimizer_adam_epsilon = 1e-6 hparams.optimizer_adam_beta1 = 0.85 hparams.optimizer_adam_beta2 = 0.997 hparams.add_hparam("large_kernel_size", 15) # New ones are added like this. hparams.add_hparam("separability", -2) # A dilation scheme, one of _DILATION_SCHEMES. hparams.add_hparam("dilation_scheme", "1.1.1.1") # A kernel scheme, one of _KERNEL_SCHEMES; overrides large_kernel_size. hparams.add_hparam("kernel_scheme", "3.7.15.31") hparams.add_hparam("audio_compression", 8) # attention-related flags hparams.add_hparam("attention_type", "simple") hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("sim_loss_mult", 0.0) # Try 10.0 for experiments. hparams.add_hparam("attention_dropout", 0.2) hparams.shared_embedding_and_softmax_weights = True return hparams
Version with Noam's decay scheme.
def slicenet_params1_noam(): """Version with Noam's decay scheme.""" hparams = slicenet_params1() hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 1.0 hparams.learning_rate_warmup_steps = 4000 hparams.initializer = "uniform_unit_scaling" hparams.optimizer_adam_epsilon = 1e-9 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 return hparams
Version for fast local runs.
def slicenet_params1_tiny(): """Version for fast local runs.""" hparams = slicenet_params1() hparams.attention_type = "simple" hparams.separability = 0 hparams.hidden_size = 128 hparams.num_hidden_layers = 2 hparams.batch_size = 512 hparams.learning_rate_warmup_steps = 200 return hparams
Small range of hyperparameters.
def slicenet_range1(ranged_hparams): """Small range of hyperparameters.""" rhp = ranged_hparams rhp.set_float("clip_grad_norm", 1.0, 10.0, scale=rhp.LOG_SCALE) rhp.set_float("learning_rate", 0.02, 1.0, scale=rhp.LOG_SCALE) rhp.set_float("optimizer_adam_beta2", 0.995, 0.998) rhp.set_float("weight_decay", 1.0, 5.0)
Converts a space-separated string of tokens to lists of ids. Also store temporary vocabulary IDs for source OOV tokens. OOVs are represented by their temporary OOV number. E.g., if the vocabulary size is 50k and the source has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002. Args: s: human-readable string to be converted. Returns: ids: list of integers ids_extend: list of integers including extended temporary vocab IDs for source OOVs. oovs: A dict storing source OOV words, used for the decoder to copy. The key is OOV word, and the value is the order they appear in the source, starting from 0. source_oov_id_to_token: a list of source OOV tokens, in the same order as they appear in the source.
def encode(self, s): """Converts a space-separated string of tokens to lists of ids. Also store temporary vocabulary IDs for source OOV tokens. OOVs are represented by their temporary OOV number. E.g., if the vocabulary size is 50k and the source has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002. Args: s: human-readable string to be converted. Returns: ids: list of integers ids_extend: list of integers including extended temporary vocab IDs for source OOVs. oovs: A dict storing source OOV words, used for the decoder to copy. The key is OOV word, and the value is the order they appear in the source, starting from 0. source_oov_id_to_token: a list of source OOV tokens, in the same order as they appear in the source. """ sentence = s tokens = sentence.strip().split() ids = [] ids_extend = [] oovs = {} for t in tokens: if t in self._token_to_id: ids.append(self._token_to_id[t]) ids_extend.append(self._token_to_id[t]) else: next_oov_id = len(oovs) oov_num = oovs.get(t, next_oov_id) if oov_num == next_oov_id: oovs[t] = oov_num ids_extend.append(self.vocab_size + oov_num) ids.append(self._token_to_id[self._replace_oov]) source_oov_id_to_token = [""] * len(oovs) for oov in oovs: source_oov_id_to_token[oovs[oov]] = oov if self._reverse: return ids[::-1], ids_extend[::-1], oovs, source_oov_id_to_token else: return ids, ids_extend, oovs, source_oov_id_to_token
Converts a space-separated string of tokens to lists of ids. Also store a version of extened vocabulary IDs. For target OOVs that are in the source, encode them using the temporary vocab IDs. For target OOVs not in the source, encode them as <UNK> Args: target: target string source_oovs: source OOV words stored in dict, key is the word, value is the order in which they appear in the source starting from 0 Returns: ids: list of integers ids_extend: list of integers including extended vocabulary IDs.
def encode_target(self, target, source_oovs): """Converts a space-separated string of tokens to lists of ids. Also store a version of extened vocabulary IDs. For target OOVs that are in the source, encode them using the temporary vocab IDs. For target OOVs not in the source, encode them as <UNK> Args: target: target string source_oovs: source OOV words stored in dict, key is the word, value is the order in which they appear in the source starting from 0 Returns: ids: list of integers ids_extend: list of integers including extended vocabulary IDs. """ tokens = target.strip().split() ids = [] ids_extend = [] for t in tokens: if t in self._token_to_id: i = self._token_to_id[t] ids.append(i) ids_extend.append(i) else: ids.append(self._token_to_id[self._replace_oov]) if t in source_oovs: vocab_idx = self.vocab_size + source_oovs[t] ids_extend.append(vocab_idx) else: ids_extend.append(self._token_to_id[self._replace_oov]) if self._reverse: return ids[::-1], ids_extend[::-1] else: return ids, ids_extend
decode ids back to tokens, considering OOVs temporary IDs. Args: ids: vocab ids. Could possibly include source temporary OOV ID starting from vocab_size. source_oov_id_to_token: a list of source OOV tokens, with the order the same as they appear in the source. Returns: decoded tokens, possibly including source OOV tokens.
def decode_list_oov(self, ids, source_oov_id_to_token): """decode ids back to tokens, considering OOVs temporary IDs. Args: ids: vocab ids. Could possibly include source temporary OOV ID starting from vocab_size. source_oov_id_to_token: a list of source OOV tokens, with the order the same as they appear in the source. Returns: decoded tokens, possibly including source OOV tokens. """ seq = reversed(ids) if self._reverse else ids tokens = [] for cur_id in seq: if cur_id in self._id_to_token: tokens.append(self._id_to_token[cur_id]) else: tokens.append(source_oov_id_to_token[cur_id - self.vocab_size]) return tokens
Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width.
def _smallest_size_at_least(height, width, smallest_side): """Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) height = tf.to_float(height) width = tf.to_float(width) smallest_side = tf.to_float(smallest_side) scale = tf.cond( tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height) new_height = tf.to_int32(height * scale) new_width = tf.to_int32(width * scale) return new_height, new_width
Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image.
def _aspect_preserving_resize(image, smallest_side): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) shape = tf.shape(image) height = shape[0] width = shape[1] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) image = tf.expand_dims(image, 0) resized_image = tf.image.resize_images( image, size=[new_height, new_width], method=tf.image.ResizeMethod.BICUBIC) resized_image = tf.squeeze(resized_image) resized_image.set_shape([None, None, 3]) return resized_image
Distort the color of a Tensor image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops for each preprocessing thread. Args: image: 3-D Tensor containing single image in [0, 1]. color_ordering: Python int, a type of distortion (valid values: 0-3). scope: Optional scope for name_scope. Returns: 3-D Tensor color-distorted image on range [0, 1] Raises: ValueError: if color_ordering not in [0, 3]
def _distort_color(image, color_ordering=0, scope=None): """Distort the color of a Tensor image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops for each preprocessing thread. Args: image: 3-D Tensor containing single image in [0, 1]. color_ordering: Python int, a type of distortion (valid values: 0-3). scope: Optional scope for name_scope. Returns: 3-D Tensor color-distorted image on range [0, 1] Raises: ValueError: if color_ordering not in [0, 3] """ with tf.name_scope(scope, "distort_color", [image]): if color_ordering == 0: image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) elif color_ordering == 1: image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) elif color_ordering == 2: image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) elif color_ordering == 3: image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) else: raise ValueError("color_ordering must be in [0, 3]") # The random_* ops do not necessarily clamp. return tf.clip_by_value(image, 0.0, 1.0)
Computes func(x, sel), with sel sampled from [0...num_cases-1]. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically.
def _apply_with_random_selector(x, func, num_cases): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) # Pass the real x only to one of the func calls. return control_flow_ops.merge([ func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) for case in range(num_cases) ])[0]
Subtracts the given means from each image channel. For example: means = [123.68, 116.779, 103.939] image = _mean_image_subtraction(image, means) Note that the rank of `image` must be known. Args: image: a tensor of size [height, width, C]. means: a C-vector of values to subtract from each channel. Returns: the centered image. Raises: ValueError: If the rank of `image` is unknown, if `image` has a rank other than three or if the number of channels in `image` doesn't match the number of values in `means`.
def _mean_image_subtraction(image, means): """Subtracts the given means from each image channel. For example: means = [123.68, 116.779, 103.939] image = _mean_image_subtraction(image, means) Note that the rank of `image` must be known. Args: image: a tensor of size [height, width, C]. means: a C-vector of values to subtract from each channel. Returns: the centered image. Raises: ValueError: If the rank of `image` is unknown, if `image` has a rank other than three or if the number of channels in `image` doesn't match the number of values in `means`. """ if image.get_shape().ndims != 3: raise ValueError("Input must be of size [height, width, C>0]") num_channels = image.get_shape().as_list()[-1] if len(means) != num_channels: raise ValueError("len(means) must match the number of channels") channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image) for i in range(num_channels): channels[i] -= means[i] return tf.concat(axis=2, values=channels)
vqa v2 preprocess image.
def vqa_v2_preprocess_image( image, height, width, mode, resize_side=512, distort=True, image_model_fn="resnet_v1_152", ): """vqa v2 preprocess image.""" image = tf.image.convert_image_dtype(image, dtype=tf.float32) assert resize_side > 0 if resize_side: image = _aspect_preserving_resize(image, resize_side) if mode == tf.estimator.ModeKeys.TRAIN: image = tf.random_crop(image, [height, width, 3]) else: # Central crop, assuming resize_height > height, resize_width > width. image = tf.image.resize_image_with_crop_or_pad(image, height, width) image = tf.clip_by_value(image, 0.0, 1.0) if mode == tf.estimator.ModeKeys.TRAIN and distort: image = _flip(image) num_distort_cases = 4 # pylint: disable=unnecessary-lambda image = _apply_with_random_selector( image, lambda x, ordering: _distort_color(x, ordering), num_cases=num_distort_cases) if image_model_fn.startswith("resnet_v1"): # resnet_v1 uses vgg preprocessing image = image * 255. image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) elif image_model_fn.startswith("resnet_v2"): # resnet v2 uses inception preprocessing image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image
Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention
def transformer_prepare_encoder(inputs, target_space, hparams, features=None): """Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters features: optionally pass the entire features dictionary as well. This is needed now for "packed" datasets. Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder attention """ ishape_static = inputs.shape.as_list() encoder_input = inputs if features and "inputs_segmentation" in features: # Packed dataset. Keep the examples from seeing each other. inputs_segmentation = features["inputs_segmentation"] inputs_position = features["inputs_position"] targets_segmentation = features["targets_segmentation"] if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: encoder_self_attention_bias = ( common_attention.attention_bias_same_segment( inputs_segmentation, inputs_segmentation)) encoder_decoder_attention_bias = ( common_attention.attention_bias_same_segment(targets_segmentation, inputs_segmentation)) else: encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) if (hasattr(hparams, "unidirectional_encoder") and hparams.unidirectional_encoder): tf.logging.info("Using unidirectional encoder") encoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(inputs)[1])) else: # Usual case - not a packed dataset. encoder_self_attention_bias = ignore_padding encoder_decoder_attention_bias = ignore_padding inputs_position = None if hparams.proximity_bias: encoder_self_attention_bias += common_attention.attention_bias_proximal( common_layers.shape_list(inputs)[1]) if target_space is not None and hparams.get("use_target_space_embedding", True): # Append target_space_id embedding to inputs. emb_target_space = common_layers.embedding( target_space, 32, ishape_static[-1], name="target_space_embedding", dtype=hparams.get("activation_dtype", "float32")) emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) encoder_input += emb_target_space if hparams.pos == "timing": if inputs_position is not None: encoder_input = common_attention.add_timing_signal_1d_given_position( encoder_input, inputs_position) else: encoder_input = common_attention.add_timing_signal_1d(encoder_input) elif hparams.pos == "emb": encoder_input = common_attention.add_positional_embedding( encoder_input, hparams.max_length, "inputs_positional_embedding", inputs_position) encoder_self_attention_bias = common_layers.cast_like( encoder_self_attention_bias, encoder_input) encoder_decoder_attention_bias = common_layers.cast_like( encoder_decoder_attention_bias, encoder_input) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias)
A stack of transformer layers. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This must either be passed in, which we do for "packed" datasets, or inferred from encoder_self_attention_bias. The knowledge about padding is used for pad_remover(efficiency) and to mask out padding in convolutional layers. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. losses: optional list onto which to append extra training losses attn_bias_for_padding: Padded attention bias in case a unidirectional encoder is being used where future attention is masked. Returns: y: a Tensors
def transformer_encoder(encoder_input, encoder_self_attention_bias, hparams, name="encoder", nonpadding=None, save_weights_to=None, make_image_summary=True, losses=None, attn_bias_for_padding=None): """A stack of transformer layers. Args: encoder_input: a Tensor encoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string nonpadding: optional Tensor with shape [batch_size, encoder_length] indicating what positions are not padding. This must either be passed in, which we do for "packed" datasets, or inferred from encoder_self_attention_bias. The knowledge about padding is used for pad_remover(efficiency) and to mask out padding in convolutional layers. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. losses: optional list onto which to append extra training losses attn_bias_for_padding: Padded attention bias in case a unidirectional encoder is being used where future attention is masked. Returns: y: a Tensors """ x = encoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS, value=hparams.num_encoder_layers or hparams.num_hidden_layers) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT, value=hparams.attention_dropout) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_ATTENTION_DENSE, value={ "use_bias": "false", "num_heads": hparams.num_heads, "hidden_size": hparams.hidden_size }) with tf.variable_scope(name): if nonpadding is not None: padding = 1.0 - nonpadding else: attention_bias = encoder_self_attention_bias if attn_bias_for_padding is not None: attention_bias = attn_bias_for_padding padding = common_attention.attention_bias_to_padding(attention_bias) nonpadding = 1.0 - padding pad_remover = None if hparams.use_pad_remover and not common_layers.is_xla_compiled(): pad_remover = expert_utils.PadRemover(padding) for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): if layer < hparams.get("num_area_layers", 0): max_area_width = hparams.get("max_area_width", 1) max_area_height = hparams.get("max_area_height", 1) memory_height = hparams.get("memory_height", 1) else: max_area_width = 1 max_area_height = 1 memory_height = 1 y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, encoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, max_relative_position=hparams.max_relative_position, heads_share_relative_embedding=( hparams.heads_share_relative_embedding), add_relative_to_values=hparams.add_relative_to_values, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=attention_dropout_broadcast_dims, max_length=hparams.get("max_length"), vars_3d=hparams.get("attention_variables_3d"), activation_dtype=hparams.get("activation_dtype", "float32"), weight_dtype=hparams.get("weight_dtype", "float32"), hard_attention_k=hparams.get("hard_attention_k", 0), max_area_width=max_area_width, max_area_height=max_area_height, memory_height=memory_height, area_key_mode=hparams.get("area_key_mode", "none"), area_value_mode=hparams.get("area_value_mode", "none"), training=(hparams.get("mode", tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN)) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams, pad_remover, conv_padding="SAME", nonpadding_mask=nonpadding, losses=losses) x = common_layers.layer_postprocess(x, y, hparams) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_NORM, value={"hidden_size": hparams.hidden_size}) return common_layers.layer_preprocess(x, hparams)
Feed-forward layer in the transformer. Args: x: a Tensor of shape [batch_size, length, hparams.hidden_size] hparams: hyperparameters for model pad_remover: an expert_utils.PadRemover object tracking the padding positions. If provided, when using convolutional settings, the padding is removed before applying the convolution, and restored afterward. This can give a significant speedup. conv_padding: a string - either "LEFT" or "SAME". nonpadding_mask: an optional Tensor with shape [batch_size, length]. needed for convolutional layers with "SAME" padding. Contains 1.0 in positions corresponding to nonpadding. losses: optional list onto which to append extra training losses cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. readout_filter_size: if it's greater than 0, then it will be used instead of filter_size layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor of shape [batch_size, length, hparams.hidden_size] Raises: ValueError: If losses arg is None, but layer generates extra losses.
def transformer_ffn_layer(x, hparams, pad_remover=None, conv_padding="LEFT", nonpadding_mask=None, losses=None, cache=None, decode_loop_step=None, readout_filter_size=0, layer_collection=None): """Feed-forward layer in the transformer. Args: x: a Tensor of shape [batch_size, length, hparams.hidden_size] hparams: hyperparameters for model pad_remover: an expert_utils.PadRemover object tracking the padding positions. If provided, when using convolutional settings, the padding is removed before applying the convolution, and restored afterward. This can give a significant speedup. conv_padding: a string - either "LEFT" or "SAME". nonpadding_mask: an optional Tensor with shape [batch_size, length]. needed for convolutional layers with "SAME" padding. Contains 1.0 in positions corresponding to nonpadding. losses: optional list onto which to append extra training losses cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. decode_loop_step: An integer, step number of the decoding loop. Only used for inference on TPU. readout_filter_size: if it's greater than 0, then it will be used instead of filter_size layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. Returns: a Tensor of shape [batch_size, length, hparams.hidden_size] Raises: ValueError: If losses arg is None, but layer generates extra losses. """ ffn_layer = hparams.ffn_layer relu_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "relu_dropout_broadcast_dims", ""))) if ffn_layer == "conv_hidden_relu": # Backwards compatibility ffn_layer = "dense_relu_dense" if ffn_layer == "dense_relu_dense": # In simple convolution mode, use `pad_remover` to speed up processing. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_FFN_FILTER_DENSE, value={ "filter_size": hparams.filter_size, "use_bias": "True", "activation": mlperf_log.RELU }) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_FFN_OUTPUT_DENSE, value={ "hidden_size": hparams.hidden_size, "use_bias": "True", }) mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_RELU_DROPOUT, value=hparams.relu_dropout) if pad_remover: original_shape = common_layers.shape_list(x) # Collapse `x` across examples, and remove padding positions. x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0)) x = tf.expand_dims(pad_remover.remove(x), axis=0) conv_output = common_layers.dense_relu_dense( x, hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, dropout_broadcast_dims=relu_dropout_broadcast_dims, layer_collection=layer_collection) if pad_remover: # Restore `conv_output` to the original shape of `x`, including padding. conv_output = tf.reshape( pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape) return conv_output elif ffn_layer == "conv_relu_conv": return common_layers.conv_relu_conv( x, readout_filter_size or hparams.filter_size, hparams.hidden_size, first_kernel_size=hparams.conv_first_kernel, second_kernel_size=1, padding=conv_padding, nonpadding_mask=nonpadding_mask, dropout=hparams.relu_dropout, cache=cache, decode_loop_step=decode_loop_step) elif ffn_layer == "parameter_attention": return common_attention.parameter_attention( x, hparams.parameter_attention_key_channels or hparams.hidden_size, hparams.parameter_attention_value_channels or hparams.hidden_size, hparams.hidden_size, readout_filter_size or hparams.filter_size, hparams.num_heads, hparams.attention_dropout) elif ffn_layer == "conv_hidden_relu_with_sepconv": return common_layers.conv_hidden_relu( x, readout_filter_size or hparams.filter_size, hparams.hidden_size, kernel_size=(3, 1), second_kernel_size=(31, 1), padding="LEFT", dropout=hparams.relu_dropout) elif ffn_layer == "sru": return common_layers.sru(x) elif ffn_layer == "local_moe_tpu": overhead = hparams.moe_overhead_eval if hparams.mode == tf.estimator.ModeKeys.TRAIN: overhead = hparams.moe_overhead_train ret, loss = expert_utils.local_moe_tpu( x, hparams.filter_size // 2, hparams.hidden_size, hparams.moe_num_experts, overhead=overhead, loss_coef=hparams.moe_loss_coef) elif ffn_layer == "local_moe": overhead = hparams.moe_overhead_eval if hparams.mode == tf.estimator.ModeKeys.TRAIN: overhead = hparams.moe_overhead_train ret, loss = expert_utils.local_moe( x, True, expert_utils.ffn_expert_fn(hparams.hidden_size, [hparams.filter_size], hparams.hidden_size), hparams.moe_num_experts, k=hparams.moe_k, hparams=hparams) losses.append(loss) return ret else: assert ffn_layer == "none" return x
Transformer on languagemodel_lm1b32k_packed. 50M Params.
def lmx_base(): """Transformer on languagemodel_lm1b32k_packed. 50M Params.""" hparams = transformer.transformer_tpu() # sharing is counterproductive when underparameterized hparams.shared_embedding_and_softmax_weights = False # we judge by log-ppl, so label smoothing hurts. hparams.label_smoothing = 0.0 # This makes the batch size on GPU the same as on TPU for a packed problem # with sequence length 256. # TODO(noam): fix the mess that is the data reading pipeline. hparams.max_length = 256 # larger batch since we only have a decoder hparams.batch_size = 4096 # save some memory so we can have a larger model hparams.activation_dtype = "bfloat16" return hparams
HParams for training languagemodel_lm1b32k_packed. 880M Params.
def lmx_h3k_f12k(): """HParams for training languagemodel_lm1b32k_packed. 880M Params.""" hparams = lmx_base() hparams.hidden_size = 3072 hparams.filter_size = 12288 hparams.batch_size = 2048 hparams.weight_dtype = "bfloat16" return hparams
HParams for training languagemodel_lm1b32k_packed. 1470M Params.
def lmx_h4k_f16k(): """HParams for training languagemodel_lm1b32k_packed. 1470M Params.""" hparams = lmx_base() hparams.hidden_size = 4096 hparams.filter_size = 16384 hparams.batch_size = 1024 hparams.weight_dtype = "bfloat16" return hparams
Language model using relative attention.
def lmx_relative(): """Language model using relative attention.""" hparams = lmx_base() hparams.self_attention_type = "dot_product_relative_v2" hparams.activation_dtype = "float32" hparams.weight_dtype = "float32" return hparams
Transformer with mixture of experts. 890M Params.
def lmx_moe_h1k_f4k_x32(): """Transformer with mixture of experts. 890M Params.""" hparams = lmx_h1k_f4k() hparams.ffn_layer = "local_moe_tpu" hparams.moe_num_experts = 32 hparams.weight_dtype = "bfloat16" hparams.batch_size = 8192 return hparams
Transformer with mixture of experts. 890M Params.
def lmx_moe_h1k_f8k_x16(): """Transformer with mixture of experts. 890M Params.""" hparams = lmx_h1k_f4k() hparams.filter_size = 8192 hparams.ffn_layer = "local_moe_tpu" hparams.moe_num_experts = 16 hparams.weight_dtype = "bfloat16" hparams.batch_size = 8192 return hparams
HParams for training languagemodel_lm1b32k_packed. 880M Params.
def lmx_h1k_f64k(): """HParams for training languagemodel_lm1b32k_packed. 880M Params.""" hparams = lmx_base() hparams.hidden_size = 1024 hparams.filter_size = 65536 hparams.batch_size = 2048 return hparams
Uncertainty reward based on logits.
def compute_uncertainty_reward(logits, predictions): """Uncertainty reward based on logits.""" # TODO(rsepassi): Add support for L1/L2 loss models. Current code only # works for softmax models. vocab_size = logits.shape[-1] assert vocab_size > 1 log_probs = common_layers.log_prob_from_logits(logits) max_log_probs = common_layers.index_last_dim_with_indices(log_probs, predictions) # Threshold neg_log_prob = tf.nn.relu(-max_log_probs - 0.02) # Sum across all but the batch dimension reduce_dims = list(range(len(neg_log_prob.shape)))[1:] summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims) return summed / 10
Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations.
def _reset_non_empty(self, indices): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ reset_video_op = tf.cond( self._video_condition, lambda: tf.py_func(self._video_reset_writer, [], []), tf.no_op) with tf.control_dependencies([reset_video_op]): inc_op = tf.assign_add(self._episode_counter, 1) with tf.control_dependencies([self.history_buffer.reset(indices), inc_op]): initial_frame_dump_op = tf.cond( self._video_condition, lambda: tf.py_func(self._video_dump_frames, # pylint: disable=g-long-lambda [self.history_buffer.get_all_elements()], []), tf.no_op) observ_assign_op = self._observ.assign( self.history_buffer.get_all_elements()[:, -1, ...]) with tf.control_dependencies([observ_assign_op, initial_frame_dump_op]): reset_model_op = tf.assign(self._reset_model, tf.constant(1.0)) with tf.control_dependencies([reset_model_op]): return tf.gather(self._observ.read_value(), indices)
Set the random seed from flag everywhere.
def set_random_seed(): """Set the random seed from flag everywhere.""" tf.set_random_seed(FLAGS.random_seed) random.seed(FLAGS.random_seed) np.random.seed(FLAGS.random_seed)
Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS.
def generate_data_for_problem(problem): """Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS.""" training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem] num_train_shards = FLAGS.num_shards or 10 tf.logging.info("Generating training data for %s.", problem) train_output_files = generator_utils.train_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_train_shards) generator_utils.generate_files(training_gen(), train_output_files, FLAGS.max_cases) num_dev_shards = int(num_train_shards * 0.1) tf.logging.info("Generating development data for %s.", problem) dev_output_files = generator_utils.dev_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_dev_shards) generator_utils.generate_files(dev_gen(), dev_output_files) num_test_shards = int(num_train_shards * 0.1) test_output_files = [] test_gen_data = test_gen() if test_gen_data is not None: tf.logging.info("Generating test data for %s.", problem) test_output_files = generator_utils.test_data_filenames( problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, num_test_shards) generator_utils.generate_files(test_gen_data, test_output_files) all_output_files = train_output_files + dev_output_files + test_output_files generator_utils.shuffle_dataset(all_output_files)
Generate data for `EnvProblem`s.
def generate_data_for_env_problem(problem_name): """Generate data for `EnvProblem`s.""" assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps " "should be greater than zero") assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be" " greather than zero") problem = registry.env_problem(problem_name) task_id = None if FLAGS.task_id < 0 else FLAGS.task_id data_dir = os.path.expanduser(FLAGS.data_dir) tmp_dir = os.path.expanduser(FLAGS.tmp_dir) # TODO(msaffar): Handle large values for env_problem_batch_size where we # cannot create that many environments within the same process. problem.initialize(batch_size=FLAGS.env_problem_batch_size) env_problem_utils.play_env_problem_randomly( problem, num_steps=FLAGS.env_problem_max_env_steps) problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id)
Generate data for a registered problem.
def generate_data_for_registered_problem(problem_name): """Generate data for a registered problem.""" tf.logging.info("Generating data for %s.", problem_name) if FLAGS.num_shards: raise ValueError("--num_shards should not be set for registered Problem.") problem = registry.problem(problem_name) task_id = None if FLAGS.task_id < 0 else FLAGS.task_id data_dir = os.path.expanduser(FLAGS.data_dir) tmp_dir = os.path.expanduser(FLAGS.tmp_dir) if task_id is None and problem.multiprocess_generate: if FLAGS.task_id_start != -1: assert FLAGS.task_id_end != -1 task_id_start = FLAGS.task_id_start task_id_end = FLAGS.task_id_end else: task_id_start = 0 task_id_end = problem.num_generate_tasks pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes) problem.prepare_to_generate(data_dir, tmp_dir) args = [(problem_name, data_dir, tmp_dir, task_id) for task_id in range(task_id_start, task_id_end)] pool.map(generate_data_in_process, args) else: problem.generate_data(data_dir, tmp_dir, task_id)
Traverses directory collecting input and target files. Args: directory: base path to extracted audio and transcripts. Returns: list of (media_base, media_filepath, label) tuples
def _collect_data(directory): """Traverses directory collecting input and target files. Args: directory: base path to extracted audio and transcripts. Returns: list of (media_base, media_filepath, label) tuples """ # Returns: data_files = [] transcripts = [ filename for filename in os.listdir(directory) if filename.endswith(".csv") ] for transcript in transcripts: transcript_path = os.path.join(directory, transcript) with open(transcript_path, "r") as transcript_file: transcript_reader = csv.reader(transcript_file) # skip header _ = next(transcript_reader) for transcript_line in transcript_reader: media_name, label = transcript_line[0:2] filename = os.path.join(directory, media_name) data_files.append((media_name, filename, label)) return data_files
Checks if the filename exists under the path.
def _file_exists(path, filename): """Checks if the filename exists under the path.""" return os.path.isfile(os.path.join(path, filename))
Checks if the filename is relative, not absolute.
def _is_relative(path, filename): """Checks if the filename is relative, not absolute.""" return os.path.abspath(os.path.join(path, filename)).startswith(path)
Define ppo step.
def define_ppo_step(data_points, hparams, action_space, lr): """Define ppo step.""" observation, action, discounted_reward, norm_advantage, old_pdf = data_points obs_shape = common_layers.shape_list(observation) observation = tf.reshape( observation, [obs_shape[0] * obs_shape[1]] + obs_shape[2:] ) (logits, new_value) = get_policy(observation, hparams, action_space) logits = tf.reshape(logits, obs_shape[:2] + [action_space.n]) new_value = tf.reshape(new_value, obs_shape[:2]) new_policy_dist = tfp.distributions.Categorical(logits=logits) new_pdf = new_policy_dist.prob(action) ratio = new_pdf / old_pdf clipped_ratio = tf.clip_by_value(ratio, 1 - hparams.clipping_coef, 1 + hparams.clipping_coef) surrogate_objective = tf.minimum(clipped_ratio * norm_advantage, ratio * norm_advantage) policy_loss = -tf.reduce_mean(surrogate_objective) value_error = new_value - discounted_reward value_loss = hparams.value_loss_coef * tf.reduce_mean(value_error ** 2) entropy = new_policy_dist.entropy() entropy_loss = -hparams.entropy_loss_coef * tf.reduce_mean(entropy) losses = [policy_loss, value_loss, entropy_loss] loss = sum(losses) variables = tf.global_variables(hparams.policy_network + "/.*") train_op = optimize.optimize(loss, lr, hparams, variables=variables) with tf.control_dependencies([train_op]): return [tf.identity(x) for x in losses]
PPO epoch.
def define_ppo_epoch(memory, hparams, action_space, batch_size): """PPO epoch.""" observation, reward, done, action, old_pdf, value = memory # This is to avoid propagating gradients through simulated environment. observation = tf.stop_gradient(observation) action = tf.stop_gradient(action) reward = tf.stop_gradient(reward) if hasattr(hparams, "rewards_preprocessing_fun"): reward = hparams.rewards_preprocessing_fun(reward) done = tf.stop_gradient(done) value = tf.stop_gradient(value) old_pdf = tf.stop_gradient(old_pdf) advantage = calculate_generalized_advantage_estimator( reward, value, done, hparams.gae_gamma, hparams.gae_lambda) discounted_reward = tf.stop_gradient(advantage + value[:-1]) advantage_mean, advantage_variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True) advantage_normalized = tf.stop_gradient( (advantage - advantage_mean)/(tf.sqrt(advantage_variance) + 1e-8)) add_lists_elementwise = lambda l1, l2: [x + y for x, y in zip(l1, l2)] number_of_batches = ((hparams.epoch_length-1) * hparams.optimization_epochs // hparams.optimization_batch_size) epoch_length = hparams.epoch_length if hparams.effective_num_agents is not None: number_of_batches *= batch_size number_of_batches //= hparams.effective_num_agents epoch_length //= hparams.effective_num_agents assert number_of_batches > 0, "Set the paremeters so that number_of_batches>0" lr = learning_rate.learning_rate_schedule(hparams) shuffled_indices = [tf.random.shuffle(tf.range(epoch_length - 1)) for _ in range(hparams.optimization_epochs)] shuffled_indices = tf.concat(shuffled_indices, axis=0) shuffled_indices = shuffled_indices[:number_of_batches * hparams.optimization_batch_size] indices_of_batches = tf.reshape(shuffled_indices, shape=(-1, hparams.optimization_batch_size)) input_tensors = [observation, action, discounted_reward, advantage_normalized, old_pdf] ppo_step_rets = tf.scan( lambda a, i: add_lists_elementwise( # pylint: disable=g-long-lambda a, define_ppo_step([tf.gather(t, indices_of_batches[i, :]) for t in input_tensors], hparams, action_space, lr )), tf.range(number_of_batches), [0., 0., 0.], parallel_iterations=1) ppo_summaries = [tf.reduce_mean(ret) / number_of_batches for ret in ppo_step_rets] ppo_summaries.append(lr) summaries_names = [ "policy_loss", "value_loss", "entropy_loss", "learning_rate" ] summaries = [tf.summary.scalar(summary_name, summary) for summary_name, summary in zip(summaries_names, ppo_summaries)] losses_summary = tf.summary.merge(summaries) for summary_name, summary in zip(summaries_names, ppo_summaries): losses_summary = tf.Print(losses_summary, [summary], summary_name + ": ") return losses_summary
Generalized advantage estimator. Returns: GAE estimator. It will be one element shorter than the input; this is because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
def calculate_generalized_advantage_estimator( reward, value, done, gae_gamma, gae_lambda): # pylint: disable=g-doc-args """Generalized advantage estimator. Returns: GAE estimator. It will be one element shorter than the input; this is because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N]. """ # pylint: enable=g-doc-args next_value = value[1:, :] next_not_done = 1 - tf.cast(done[1:, :], tf.float32) delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done - value[:-1, :]) return_ = tf.reverse(tf.scan( lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg, [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])], tf.zeros_like(delta[0, :]), parallel_iterations=1), [0]) return tf.check_numerics(return_, "return")
Returns a reading spec of a gym space. NOTE: Only implemented currently for Box and Discrete. Args: gym_space: instance of gym.spaces whose spec we want. Returns: Reading spec for that space. Raises: NotImplementedError: For spaces whose reading spec we haven't implemented.
def gym_space_spec(gym_space): """Returns a reading spec of a gym space. NOTE: Only implemented currently for Box and Discrete. Args: gym_space: instance of gym.spaces whose spec we want. Returns: Reading spec for that space. Raises: NotImplementedError: For spaces whose reading spec we haven't implemented. """ # First try to determine the type. try: tf_dtype = tf.as_dtype(gym_space.dtype) except TypeError as e: tf.logging.error("Cannot convert space's type [%s] to tf.dtype", gym_space.dtype) raise e # Now hand it over to the specialized functions. if isinstance(gym_space, Box): return box_space_spec(gym_space, tf_dtype) elif isinstance(gym_space, Discrete): return discrete_space_spec(gym_space, tf_dtype) else: raise NotImplementedError
Number of elements that can be represented by the space. Makes the most sense for Discrete or Box type with integral dtype, ex: number of actions in an action space. Args: gym_space: The gym space. Returns: np.int64 number of observations that can be represented by this space, or returns None when this doesn't make sense, i.e. float boxes etc. Raises: NotImplementedError when a space's cardinality makes sense but we haven't implemented it.
def cardinality(gym_space): """Number of elements that can be represented by the space. Makes the most sense for Discrete or Box type with integral dtype, ex: number of actions in an action space. Args: gym_space: The gym space. Returns: np.int64 number of observations that can be represented by this space, or returns None when this doesn't make sense, i.e. float boxes etc. Raises: NotImplementedError when a space's cardinality makes sense but we haven't implemented it. """ if (gym_space.dtype == np.float32) or (gym_space.dtype == np.float64): tf.logging.error("Returning None for a float gym space's cardinality: ", gym_space) return None if isinstance(gym_space, Discrete): return gym_space.n if isinstance(gym_space, Box): # Construct a box with all possible values in this box and take a product. return np.prod(gym_space.high - gym_space.low + 1) raise NotImplementedError
RMSE but will argmax if last dim is not 1.
def image_rmse(predictions, labels, weights_fn=common_layers.weights_all): """RMSE but will argmax if last dim is not 1.""" if common_layers.shape_list(predictions)[-1] == 1: predictions = tf.squeeze(predictions, axis=[-1]) else: predictions = tf.argmax(predictions, axis=-1) return padded_rmse(predictions, labels, weights_fn)
Computes mean(abs(preds-target)).
def abs_error(predictions, labels, weights_fn=None): """Computes mean(abs(preds-target)).""" del weights_fn # Unused targets = tf.squeeze(labels, axis=[2, 3]) batch_abs_error = tf.abs(predictions - targets) den = tf.ones(tf.shape(batch_abs_error), dtype=tf.float32) return (batch_abs_error, den)
Explained variance, also known as R^2.
def padded_variance_explained(predictions, labels, weights_fn=common_layers.weights_all): """Explained variance, also known as R^2.""" predictions, labels = common_layers.pad_with_zeros(predictions, labels) targets = labels weights = weights_fn(targets) y_bar = tf.reduce_mean(weights * targets) tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2)) res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2)) r2 = 1. - res_ss / tot_ss return r2, tf.reduce_sum(weights)
Percentage of times that top-k predictions matches labels on non-0s.
def padded_accuracy_topk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Percentage of times that top-k predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) effective_k = tf.minimum(k, common_layers.shape_list(padded_predictions)[-1]) _, outputs = tf.nn.top_k(padded_predictions, k=effective_k) outputs = tf.to_int32(outputs) padded_labels = tf.to_int32(padded_labels) padded_labels = tf.expand_dims(padded_labels, axis=-1) padded_labels += tf.zeros_like(outputs) # Pad to same shape. same = tf.to_float(tf.equal(outputs, padded_labels)) same_topk = tf.reduce_sum(same, axis=-1) return same_topk, weights
Sequence accuracy for L1/L2 losses: round down the predictions to ints.
def rounding_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Sequence accuracy for L1/L2 losses: round down the predictions to ints.""" outputs = tf.squeeze(tf.to_int32(predictions), axis=-1) weights = weights_fn(labels) labels = tf.to_int32(labels) not_correct = tf.to_float(tf.not_equal(outputs, labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0)
Percentage of times that predictions matches labels everywhere (non-0).
def padded_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Percentage of times that predictions matches labels everywhere (non-0).""" # If the last dimension is 1 then we're using L1/L2 loss. if common_layers.shape_list(predictions)[-1] == 1: return rounding_sequence_accuracy( predictions, labels, weights_fn=weights_fn) with tf.variable_scope( "padded_sequence_accuracy", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) # Flatten, keeping batch dim (and num_classes dim for predictions) # TPU argmax can only deal with a limited number of dimensions predictions_shape = common_layers.shape_list(padded_predictions) batch_size = predictions_shape[0] num_classes = predictions_shape[-1] flat_size = common_layers.list_product( common_layers.shape_list(padded_labels)[1:]) padded_predictions = tf.reshape( padded_predictions, [batch_size, common_layers.list_product(predictions_shape[1:-1]), num_classes]) padded_labels = tf.reshape(padded_labels, [batch_size, flat_size]) weights = tf.reshape(weights, [batch_size, flat_size]) outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) padded_labels = tf.to_int32(padded_labels) not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0)
Average edit distance, ignoring padding 0s. The score returned is the edit distance divided by the total length of reference truth and the weight returned is the total length of the truth. Args: predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and type tf.float32 representing the logits, 0-padded. labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 representing the labels of same length as logits and 0-padded. weights_fn: ignored. The weights returned are the total length of the ground truth labels, excluding 0-paddings. Returns: (edit distance / reference length, reference length) Raises: ValueError: if weights_fn is not common_layers.weights_nonzero.
def sequence_edit_distance(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average edit distance, ignoring padding 0s. The score returned is the edit distance divided by the total length of reference truth and the weight returned is the total length of the truth. Args: predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and type tf.float32 representing the logits, 0-padded. labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 representing the labels of same length as logits and 0-padded. weights_fn: ignored. The weights returned are the total length of the ground truth labels, excluding 0-paddings. Returns: (edit distance / reference length, reference length) Raises: ValueError: if weights_fn is not common_layers.weights_nonzero. """ if weights_fn is not common_layers.weights_nonzero: raise ValueError("Only weights_nonzero can be used for this metric.") with tf.variable_scope("edit_distance", values=[predictions, labels]): # Transform logits into sequence classes by taking max at every step. predictions = tf.to_int32( tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3))) nonzero_idx = tf.where(tf.not_equal(predictions, 0)) sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(predictions, nonzero_idx), tf.shape(predictions, out_type=tf.int64)) labels = tf.squeeze(labels, axis=(2, 3)) nonzero_idx = tf.where(tf.not_equal(labels, 0)) label_sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(labels, nonzero_idx), tf.shape(labels, out_type=tf.int64)) distance = tf.reduce_sum( tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False)) reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0]) return distance / reference_length, reference_length
Average log-perplexity exluding padding 0s. No smoothing.
def padded_neg_log_perplexity(predictions, labels, weights_fn=common_layers.weights_nonzero): """Average log-perplexity exluding padding 0s. No smoothing.""" num, den = common_layers.padded_cross_entropy( predictions, labels, 0.0, weights_fn=weights_fn, reduce_sum=False) return (-num, den)
Average log-perplexity with custom targets_mask.
def padded_neg_log_perplexity_with_masking( predictions, labels, features, weights_fn=None): """Average log-perplexity with custom targets_mask.""" del weights_fn if "targets_mask" not in features: raise ValueError("masked_neg_log_perplexity requires targets_mask feature") # Features are 4 dimensional, so we need to reshape the targets_mask to match # the shape of the labels. A lot of models rely on these features being 4D, # so it's best to update the shape of the mask. extended_targets_mask_shape = common_layers.shape_list( features["targets_mask"]) extended_targets_mask_shape.extend([1, 1]) features["targets_mask"] = tf.reshape(features["targets_mask"], shape=extended_targets_mask_shape) mask_fn = lambda labels: features["targets_mask"] return padded_neg_log_perplexity(predictions, labels, mask_fn)
Average log-perplexity excluding padding 0s. No smoothing.
def dmol_neg_log_perplexity(predictions, labels, weights_fn=None): """Average log-perplexity excluding padding 0s. No smoothing.""" del weights_fn # Unused num, den = common_layers.dml_loss( predictions, labels, reduce_sum=False) return (-num, den)
Rounding accuracy for L1/L2 losses: round down the predictions to ints.
def rounding_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Rounding accuracy for L1/L2 losses: round down the predictions to ints.""" outputs = tf.squeeze(tf.to_int32(predictions)) labels = tf.squeeze(labels) weights = weights_fn(labels) labels = tf.to_int32(labels) return tf.to_float(tf.equal(outputs, labels)), weights
Percentage of times that predictions matches labels on non-0s.
def padded_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Percentage of times that predictions matches labels on non-0s.""" # If the last dimension is 1 then we're using L1/L2 loss. if common_layers.shape_list(predictions)[-1] == 1: return rounding_accuracy(predictions, labels, weights_fn=weights_fn) with tf.variable_scope("padded_accuracy", values=[predictions, labels]): padded_predictions, padded_labels = common_layers.pad_with_zeros( predictions, labels) weights = weights_fn(padded_labels) outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) padded_labels = tf.to_int32(padded_labels) return tf.to_float(tf.equal(outputs, padded_labels)), weights
Used to evaluate the VQA accuracy. Let n be the times that predictions appear in labels, then final score is min(n/k, 1). Refer to https://arxiv.org/pdf/1505.00468.pdf. Args: predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size]. labels: A tensor with shape [batch_size, length, 1, 1]. k: A tensor constant. weights_fn: weight function. Returns: scores: min(n/k, 1). weights: returns all ones.
def multilabel_accuracy_matchk(predictions, labels, k, weights_fn=common_layers.weights_nonzero): """Used to evaluate the VQA accuracy. Let n be the times that predictions appear in labels, then final score is min(n/k, 1). Refer to https://arxiv.org/pdf/1505.00468.pdf. Args: predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size]. labels: A tensor with shape [batch_size, length, 1, 1]. k: A tensor constant. weights_fn: weight function. Returns: scores: min(n/k, 1). weights: returns all ones. """ predictions = tf.to_int32(tf.argmax(predictions, axis=-1)) scores = tf.to_float(tf.equal(predictions, labels)) # those label == 0 do not count weights = weights_fn(labels) scores *= weights scores = tf.reduce_sum(scores, axis=[1, 2, 3]) scores = tf.minimum(scores / tf.to_float(k), 1) # every sample count weights = tf.ones(tf.shape(scores), dtype=tf.float32) return scores, weights
Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels].
def set_precision(predictions, labels, weights_fn=common_layers.weights_nonzero): """Precision of set predictions. Args: predictions : A Tensor of scores of shape [batch, nlabels]. labels: A Tensor of int32s giving true set elements, of shape [batch, seq_length]. weights_fn: A function to weight the elements. Returns: hits: A Tensor of shape [batch, nlabels]. weights: A Tensor of shape [batch, nlabels]. """ with tf.variable_scope("set_precision", values=[predictions, labels]): labels = tf.squeeze(labels, [2, 3]) weights = weights_fn(labels) labels = tf.one_hot(labels, predictions.shape[-1]) labels = tf.reduce_max(labels, axis=1) labels = tf.cast(labels, tf.bool) return tf.to_float(tf.equal(labels, predictions)), weights
Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions.
def image_summary(predictions, targets, hparams): """Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions. """ del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)
Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights
def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None): """Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights """ with tf.variable_scope("softmax_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.softmax_cross_entropy( onehot_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None): """Calculate accuracy for a set, given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: accuracy (scalar), weights """ with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]): del weights_fn predictions = tf.nn.sigmoid(logits) labels = tf.argmax(labels, -1) predictions = tf.argmax(predictions, -1) _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions) return accuracy, tf.constant(1.0)
Calculate recall for a set, given one-hot labels and logits. Predictions are converted to one-hot, as predictions[example][arg-max(example)] = 1 Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: recall (scalar), weights
def sigmoid_recall_one_hot(logits, labels, weights_fn=None): """Calculate recall for a set, given one-hot labels and logits. Predictions are converted to one-hot, as predictions[example][arg-max(example)] = 1 Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: recall (scalar), weights """ with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]): del weights_fn num_classes = logits.shape[-1] predictions = tf.nn.sigmoid(logits) predictions = tf.argmax(predictions, -1) predictions = tf.one_hot(predictions, num_classes) _, recall = tf.metrics.recall(labels=labels, predictions=predictions) return recall, tf.constant(1.0)
Calculate sigmoid cross entropy for one-hot lanels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross_entropy (scalar), weights
def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None): """Calculate sigmoid cross entropy for one-hot lanels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross_entropy (scalar), weights """ with tf.variable_scope("sigmoid_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.sigmoid_cross_entropy( multi_class_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights
def roc_auc(logits, labels, weights_fn=None): """Calculate ROC AUC. Requires binary classes. Args: logits: Tensor of size [batch_size, 1, 1, num_classes] labels: Tensor of size [batch_size, 1, 1, num_classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: ROC AUC (scalar), weights """ del weights_fn with tf.variable_scope("roc_auc", values=[logits, labels]): predictions = tf.argmax(logits, axis=-1) _, auc = tf.metrics.auc(labels, predictions, curve="ROC") return auc, tf.constant(1.0)
Creates the evaluation metrics for the model. Args: problems: List of Problem instances. model_hparams: a set of hparams. Returns: dict<metric name, metric function>. The metric functions have signature (Tensor predictions, features) -> (metric Tensor, update op), where features is a dict with keys {targets}. Raises: ValueError: if the metrics specified by a problem are not recognized (i.e. are not defined in the Metrics enum.
def create_evaluation_metrics(problems, model_hparams): """Creates the evaluation metrics for the model. Args: problems: List of Problem instances. model_hparams: a set of hparams. Returns: dict<metric name, metric function>. The metric functions have signature (Tensor predictions, features) -> (metric Tensor, update op), where features is a dict with keys {targets}. Raises: ValueError: if the metrics specified by a problem are not recognized (i.e. are not defined in the Metrics enum. """ def reduce_dimensions(predictions, labels): """Reduce dimensions for high-dimensional predictions and labels.""" # We will treat first dimensions as batch. One example are video frames. if len(predictions.get_shape()) > 5: predictions_shape = common_layers.shape_list(predictions) predictions = tf.reshape( predictions, [predictions_shape[0], predictions_shape[1], -1, predictions_shape[-1]]) labels_shape = common_layers.shape_list(labels) labels = tf.reshape( labels, [labels_shape[0], labels_shape[1], -1]) return predictions, labels def make_problem_specific_metric_fn(metric_fn, weights_fn): """Create a metric fn.""" def problem_metric_fn(predictions, features, labels): """Metric fn.""" # Send along the entire features dict if the metric fn has the kwarg # "features". kwargs = {} args, _, keywords, _ = inspect.getargspec(metric_fn) if ("features" in args) or keywords: kwargs["features"] = features predictions, labels = reduce_dimensions(predictions, labels) scores, weights = metric_fn(predictions, labels, weights_fn=weights_fn, **kwargs) return tf.metrics.mean(scores, weights) return problem_metric_fn def make_image_wrapped_metric_fn(metric_fn): """Metric fn without tf.metrics.mean.""" def image_wrapped_metric_fn(predictions, features, labels, weights_fn=common_layers.weights_all): del weights_fn del features predictions, labels = reduce_dimensions(predictions, labels) return metric_fn(predictions, labels, model_hparams) return image_wrapped_metric_fn def weights_fn_for_mp(problem_task_id): return lambda x: common_layers.weights_multi_problem(x, problem_task_id) eval_metrics = {} for problem_instance in problems: problem_name = problem_instance.name if problem_instance.was_reversed: problem_name += "_rev" metrics = problem_instance.eval_metric_fns(model_hparams) if hasattr(model_hparams.problem, "task_list"): metrics = model_hparams.problem.eval_metric_fns(model_hparams) tm = problem_instance.get_hparams(model_hparams).modality["targets"] if not isinstance(tm, dict): tm = {"targets": tm} for target_name, modality in six.iteritems(tm): weights_fn = model_hparams.weights_fn.get( "targets", modalities.get_weights_fn(modality)) if hasattr(model_hparams.problem, "task_list"): ptid = problem_instance.task_id # pylint: disable=cell-var-from-loop weights_fn = weights_fn_for_mp(ptid) for metric, metric_fn in six.iteritems(metrics): overload_eval_metric_name = getattr( model_hparams, "overload_eval_metric_name", None) if len(problems) == 1 and overload_eval_metric_name: metric_name = "metrics-%s/%s/%s" % ( overload_eval_metric_name, target_name, metric) else: metric_name = "metrics-%s/%s/%s" % (problem_name, target_name, metric) if metric == Metrics.IMAGE_SUMMARY: eval_metrics[metric_name] = make_image_wrapped_metric_fn(metric_fn) else: eval_metrics[metric_name] = make_problem_specific_metric_fn( metric_fn, weights_fn) return eval_metrics
See create_eager_metrics.
def create_eager_metrics_for_problem(problem, model_hparams): """See create_eager_metrics.""" metric_fns = problem.eval_metric_fns(model_hparams) problem_hparams = problem.get_hparams(model_hparams) target_modality = problem_hparams.modality["targets"] weights_fn = model_hparams.weights_fn.get( "targets", modalities.get_weights_fn(target_modality)) return create_eager_metrics_internal(metric_fns, weights_fn=weights_fn)
Create metrics accumulators and averager for Eager mode. Args: metric_names: list<str> from Metrics enum weights_fn: function that takes labels and returns a weights mask. Defaults to weights of all 1, i.e. common_layers.weights_all. Use common_layers.weights_nonzero if labels have 0-padding. Returns: (accum_fn(predictions, targets) => None, result_fn() => dict<str metric_name, float avg_val>
def create_eager_metrics(metric_names, weights_fn=common_layers.weights_all): """Create metrics accumulators and averager for Eager mode. Args: metric_names: list<str> from Metrics enum weights_fn: function that takes labels and returns a weights mask. Defaults to weights of all 1, i.e. common_layers.weights_all. Use common_layers.weights_nonzero if labels have 0-padding. Returns: (accum_fn(predictions, targets) => None, result_fn() => dict<str metric_name, float avg_val> """ metric_fns = dict( [(name, METRICS_FNS[name]) for name in metric_names]) return create_eager_metrics_internal(metric_fns, weights_fn)
Create metrics accumulators and averager for Eager mode. Args: metric_fns: dict<metric name, metric function> weights_fn: function that takes labels and returns a weights mask. Defaults to weights of all 1, i.e. common_layers.weights_all. Use common_layers.weights_nonzero if labels have 0-padding. Returns: (accum_fn(predictions, targets) => None, result_fn() => dict<str metric_name, float avg_val>
def create_eager_metrics_internal(metric_fns, weights_fn=common_layers.weights_all): """Create metrics accumulators and averager for Eager mode. Args: metric_fns: dict<metric name, metric function> weights_fn: function that takes labels and returns a weights mask. Defaults to weights of all 1, i.e. common_layers.weights_all. Use common_layers.weights_nonzero if labels have 0-padding. Returns: (accum_fn(predictions, targets) => None, result_fn() => dict<str metric_name, float avg_val> """ tfe_metrics = {} for name in metric_fns: tfe_metrics[name] = tfe.metrics.Mean(name=name) def metric_accum(predictions, targets): for name, metric_fn in metric_fns.items(): val, weight = metric_fn(predictions, targets, weights_fn=weights_fn) tfe_metrics[name](np.squeeze(val), np.squeeze(weight)) def metric_means(): avgs = {} for name in metric_fns: avgs[name] = tfe_metrics[name].result().numpy() return avgs return metric_accum, metric_means
Calculate word error rate. Args: raw_predictions: The raw predictions. labels: The actual labels. lookup: A tf.constant mapping indices to output tokens. weights_fn: Weighting function. Returns: The word error rate.
def word_error_rate(raw_predictions, labels, lookup=None, weights_fn=common_layers.weights_nonzero): """Calculate word error rate. Args: raw_predictions: The raw predictions. labels: The actual labels. lookup: A tf.constant mapping indices to output tokens. weights_fn: Weighting function. Returns: The word error rate. """ def from_tokens(raw, lookup_): gathered = tf.gather(lookup_, tf.cast(raw, tf.int32)) joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b"<EOS>.*", b"") cleaned = tf.regex_replace(joined, b"_", b" ") tokens = tf.string_split(cleaned, " ") return tokens def from_characters(raw, lookup_): """Convert ascii+2 encoded codes to string-tokens.""" corrected = tf.bitcast( tf.clip_by_value(tf.subtract(raw, 2), 0, 255), tf.uint8) gathered = tf.gather(lookup_, tf.cast(corrected, tf.int32))[:, :, 0] joined = tf.reduce_join(gathered, axis=1) cleaned = tf.regex_replace(joined, b"\0", b"") tokens = tf.string_split(cleaned, " ") return tokens if lookup is None: lookup = tf.constant([chr(i) for i in range(256)]) convert_fn = from_characters else: convert_fn = from_tokens if weights_fn is not common_layers.weights_nonzero: raise ValueError("Only weights_nonzero can be used for this metric.") with tf.variable_scope("word_error_rate", values=[raw_predictions, labels]): raw_predictions = tf.squeeze( tf.argmax(raw_predictions, axis=-1), axis=(2, 3)) labels = tf.squeeze(labels, axis=(2, 3)) reference = convert_fn(labels, lookup) predictions = convert_fn(raw_predictions, lookup) distance = tf.reduce_sum( tf.edit_distance(predictions, reference, normalize=False)) reference_length = tf.cast( tf.size(reference.values, out_type=tf.int32), dtype=tf.float32) return distance / reference_length, reference_length
Calculate pearson correlation coefficient. Args: predictions: The raw predictions. labels: The actual labels. weights_fn: Weighting function. Returns: The pearson correlation coefficient.
def pearson_correlation_coefficient(predictions, labels, weights_fn=None): """Calculate pearson correlation coefficient. Args: predictions: The raw predictions. labels: The actual labels. weights_fn: Weighting function. Returns: The pearson correlation coefficient. """ del weights_fn _, pearson = tf.contrib.metrics.streaming_pearson_correlation(predictions, labels) return pearson, tf.constant(1.0)
Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters Returns: decoder_input: a Tensor, bottom of decoder stack decoder_self_attention_bias: a Tensor, containing large negative values to implement masked attention and possibly biases for diagonal alignments
def attention_lm_prepare_decoder(targets, hparams): """Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters Returns: decoder_input: a Tensor, bottom of decoder stack decoder_self_attention_bias: a Tensor, containing large negative values to implement masked attention and possibly biases for diagonal alignments """ if hparams.prepend_mode == "prepend_inputs_full_attention": decoder_self_attention_bias = ( common_attention.attention_bias_prepend_inputs_full_attention( common_attention.embedding_to_padding(targets))) else: decoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(targets)[1])) decoder_input = common_layers.shift_right_3d(targets) if hparams.pos == "timing": decoder_input = common_attention.add_timing_signal_1d(decoder_input) return (decoder_input, decoder_self_attention_bias)
A stack of attention_lm layers. Args: decoder_input: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors
def attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams, name="decoder"): """A stack of attention_lm layers. Args: decoder_input: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string Returns: y: a Tensors """ x = decoder_input with tf.variable_scope(name): for layer in range(hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess( x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) x = common_layers.layer_postprocess(x, y, hparams) with tf.variable_scope("ffn"): y = common_layers.conv_hidden_relu( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout) x = common_layers.layer_postprocess(x, y, hparams) return common_layers.layer_preprocess(x, hparams)
Set of hyperparameters.
def attention_lm_base(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.hidden_size = 1024 hparams.batch_size = 8192 hparams.max_length = 256 hparams.dropout = 0.0 hparams.clip_grad_norm = 0. # i.e. no gradient clipping hparams.optimizer_adam_epsilon = 1e-9 hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.learning_rate_warmup_steps = 2000 hparams.initializer_gain = 1.0 hparams.num_hidden_layers = 6 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 hparams.label_smoothing = 0.0 hparams.shared_embedding_and_softmax_weights = False hparams.add_hparam("filter_size", 4096) # Add new ones like this. # attention-related flags hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. hparams.add_hparam("attention_dropout", 0.0) hparams.add_hparam("relu_dropout", 0.0) hparams.add_hparam("pos", "timing") # timing, none hparams.add_hparam("encoder_full_attention", False) return hparams
Cheap model. on lm1b_32k: 45M params 2 steps/sec on [GeForce GTX TITAN X] Returns: an hparams object.
def attention_lm_small(): """Cheap model. on lm1b_32k: 45M params 2 steps/sec on [GeForce GTX TITAN X] Returns: an hparams object. """ hparams = attention_lm_base() hparams.num_hidden_layers = 4 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.layer_prepostprocess_dropout = 0.5 return hparams
Version to use for seq2seq.
def attention_lm_translation(): """Version to use for seq2seq.""" hparams = attention_lm_base() hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.learning_rate = 0.4 hparams.prepend_mode = "prepend_inputs_masked_attention" hparams.max_length = 512 hparams.label_smoothing = 0.1 hparams.shared_embedding_and_softmax_weights = True return hparams
BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: bleu: int, approx bleu score
def bleu_score(predictions, labels, **unused_kwargs): """BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: bleu: int, approx bleu score """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return bleu, tf.constant(1.0)
Extracts all n-grams up to a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams up to max_order in segment with a count of how many times each n-gram occurred.
def _get_ngrams(segment, max_order): """Extracts all n-grams up to a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams up to max_order in segment with a count of how many times each n-gram occurred. """ ngram_counts = collections.Counter() for order in range(1, max_order + 1): for i in range(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts
r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens
def bleu_tokenize(string): r"""Tokenize a string following the official BLEU implementation. See https://github.com/moses-smt/mosesdecoder/" "blob/master/scripts/generic/mteval-v14.pl#L954-L983 In our case, the input string is expected to be just one line and no HTML entities de-escaping is needed. So we just tokenize on punctuation and symbols, except when a punctuation is preceded and followed by a digit (e.g. a comma/dot as a thousand/decimal separator). Note that a number (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` does not match this case (unless we add a space after each sentence). However, this error is already in the original mteval-v14.pl and we want to be consistent with it. Args: string: the input string Returns: a list of tokens """ string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string) string = uregex.punct_nondigit_re.sub(r" \1 \2", string) string = uregex.symbol_re.sub(r" \1 ", string) return string.split()
Compute BLEU for two files (reference and hypothesis translation).
def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False): """Compute BLEU for two files (reference and hypothesis translation).""" ref_lines = text_encoder.native_to_unicode( tf.gfile.Open(ref_filename, "r").read()).split("\n") hyp_lines = text_encoder.native_to_unicode( tf.gfile.Open(hyp_filename, "r").read()).split("\n") assert len(ref_lines) == len(hyp_lines), ("{} != {}".format( len(ref_lines), len(hyp_lines))) if not case_sensitive: ref_lines = [x.lower() for x in ref_lines] hyp_lines = [x.lower() for x in hyp_lines] ref_tokens = [bleu_tokenize(x) for x in ref_lines] hyp_tokens = [bleu_tokenize(x) for x in hyp_lines] return compute_bleu(ref_tokens, hyp_tokens)
Glob twice, first time possibly catching `NotFoundError`. tf.gfile.Glob may crash with ``` tensorflow.python.framework.errors_impl.NotFoundError: xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40; No such file or directory ``` Standard glob.glob does not have this bug, but does not handle multiple filesystems (e.g. `gs://`), so we call tf.gfile.Glob, the first time possibly catching the `NotFoundError`. Args: pattern: str, glob pattern. Returns: list<str> matching filepaths.
def _try_twice_tf_glob(pattern): """Glob twice, first time possibly catching `NotFoundError`. tf.gfile.Glob may crash with ``` tensorflow.python.framework.errors_impl.NotFoundError: xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40; No such file or directory ``` Standard glob.glob does not have this bug, but does not handle multiple filesystems (e.g. `gs://`), so we call tf.gfile.Glob, the first time possibly catching the `NotFoundError`. Args: pattern: str, glob pattern. Returns: list<str> matching filepaths. """ try: return tf.gfile.Glob(pattern) except tf.errors.NotFoundError: return tf.gfile.Glob(pattern)
Return list of StepFiles sorted by step from files at path_prefix.
def _read_stepfiles_list(path_prefix, path_suffix=".index", min_steps=0): """Return list of StepFiles sorted by step from files at path_prefix.""" stepfiles = [] for filename in _try_twice_tf_glob(path_prefix + "*-[0-9]*" + path_suffix): basename = filename[:-len(path_suffix)] if path_suffix else filename try: steps = int(basename.rsplit("-")[-1]) except ValueError: # The -[0-9]* part is not an integer. continue if steps < min_steps: continue if not os.path.exists(filename): tf.logging.info(filename + " was deleted, so skipping it") continue stepfiles.append(StepFile(basename, os.path.getmtime(filename), os.path.getctime(filename), steps)) return sorted(stepfiles, key=lambda x: -x.steps)
Continuously yield new files with steps in filename as they appear. This is useful for checkpoint files or other files whose names differ just in an integer marking the number of steps and match the wildcard path_prefix + "*-[0-9]*" + path_suffix. Unlike `tf.contrib.training.checkpoints_iterator`, this implementation always starts from the oldest files (and it cannot miss any file). Note that the oldest checkpoint may be deleted anytime by Tensorflow (if set up so). It is up to the user to check that the files returned by this generator actually exist. Args: path_prefix: The directory + possible common filename prefix to the files. wait_minutes: The maximum amount of minutes to wait between files. min_steps: Skip files with lower global step. path_suffix: Common filename suffix (after steps), including possible extension dot. sleep_sec: How often to check for new files. Yields: named tuples (filename, mtime, ctime, steps) of the files as they arrive.
def stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0, path_suffix=".index", sleep_sec=10): """Continuously yield new files with steps in filename as they appear. This is useful for checkpoint files or other files whose names differ just in an integer marking the number of steps and match the wildcard path_prefix + "*-[0-9]*" + path_suffix. Unlike `tf.contrib.training.checkpoints_iterator`, this implementation always starts from the oldest files (and it cannot miss any file). Note that the oldest checkpoint may be deleted anytime by Tensorflow (if set up so). It is up to the user to check that the files returned by this generator actually exist. Args: path_prefix: The directory + possible common filename prefix to the files. wait_minutes: The maximum amount of minutes to wait between files. min_steps: Skip files with lower global step. path_suffix: Common filename suffix (after steps), including possible extension dot. sleep_sec: How often to check for new files. Yields: named tuples (filename, mtime, ctime, steps) of the files as they arrive. """ # Wildcard D*-[0-9]* does not match D/x-1, so if D is a directory let # path_prefix="D/". if not path_prefix.endswith(os.sep) and os.path.isdir(path_prefix): path_prefix += os.sep stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) tf.logging.info("Found %d files with steps: %s", len(stepfiles), ", ".join(str(x.steps) for x in reversed(stepfiles))) exit_time = time.time() + wait_minutes * 60 while True: if not stepfiles and wait_minutes: tf.logging.info( "Waiting till %s if a new file matching %s*-[0-9]*%s appears", time.asctime(time.localtime(exit_time)), path_prefix, path_suffix) while True: stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) if stepfiles or time.time() > exit_time: break time.sleep(sleep_sec) if not stepfiles: return stepfile = stepfiles.pop() exit_time, min_steps = (stepfile.ctime + wait_minutes * 60, stepfile.steps + 1) yield stepfile
Extract the VQA V2 annotation files to directory unless it's there.
def _get_vqa_v2_annotations(directory, annotation_url, annotation_filename="vqa_v2.tar.gz"): """Extract the VQA V2 annotation files to directory unless it's there.""" annotation_file = generator_utils.maybe_download_from_drive( directory, annotation_filename, annotation_url) with tarfile.open(annotation_file, "r:gz") as annotation_tar: annotation_tar.extractall(directory)
Extract the VQA V2 image data set to directory unless it's there.
def _get_vqa_v2_image_raw_dataset(directory, image_root_url, image_urls): """Extract the VQA V2 image data set to directory unless it's there.""" for url in image_urls: filename = os.path.basename(url) download_url = os.path.join(image_root_url, url) path = generator_utils.maybe_download(directory, filename, download_url) unzip_dir = os.path.join(directory, filename.strip(".zip")) if not tf.gfile.Exists(unzip_dir): zipfile.ZipFile(path, "r").extractall(directory)
Extract the VQA V2 feature data set to directory unless it's there.
def _get_vqa_v2_image_feature_dataset( directory, feature_url, feature_filename="mscoco_feat.tar.gz"): """Extract the VQA V2 feature data set to directory unless it's there.""" feature_file = generator_utils.maybe_download_from_drive( directory, feature_filename, feature_url) with tarfile.open(feature_file, "r:gz") as feature_tar: feature_tar.extractall(directory)
Helper function for raising a value error for bad assignment.
def _parse_fail(name, var_type, value, values): """Helper function for raising a value error for bad assignment.""" raise ValueError( 'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' % (name, var_type.__name__, value, values))
Update results_dictionary with a scalar value. Used to update the results_dictionary to be returned by parse_values when encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) Mutates results_dictionary. Args: name: Name of variable in assignment ("s" or "arr"). parse_fn: Function for parsing the actual value. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) m_dict['index']: List index value (or None) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has already been used.
def _process_scalar_value(name, parse_fn, var_type, m_dict, values, results_dictionary): """Update results_dictionary with a scalar value. Used to update the results_dictionary to be returned by parse_values when encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) Mutates results_dictionary. Args: name: Name of variable in assignment ("s" or "arr"). parse_fn: Function for parsing the actual value. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) m_dict['index']: List index value (or None) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has already been used. """ try: parsed_value = parse_fn(m_dict['val']) except ValueError: _parse_fail(name, var_type, m_dict['val'], values) # If no index is provided if not m_dict['index']: if name in results_dictionary: _reuse_fail(name, values) results_dictionary[name] = parsed_value else: if name in results_dictionary: # The name has already been used as a scalar, then it # will be in this dictionary and map to a non-dictionary. if not isinstance(results_dictionary.get(name), dict): _reuse_fail(name, values) else: results_dictionary[name] = {} index = int(m_dict['index']) # Make sure the index position hasn't already been assigned a value. if index in results_dictionary[name]: _reuse_fail('{}[{}]'.format(name, index), values) results_dictionary[name][index] = parsed_value
Update results_dictionary from a list of values. Used to update results_dictionary to be returned by parse_values when encountering a clause with a list RHS (e.g. "arr=[1,2,3]".) Mutates results_dictionary. Args: name: Name of variable in assignment ("arr"). parse_fn: Function for parsing individual values. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has an index or the values cannot be parsed.
def _process_list_value(name, parse_fn, var_type, m_dict, values, results_dictionary): """Update results_dictionary from a list of values. Used to update results_dictionary to be returned by parse_values when encountering a clause with a list RHS (e.g. "arr=[1,2,3]".) Mutates results_dictionary. Args: name: Name of variable in assignment ("arr"). parse_fn: Function for parsing individual values. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has an index or the values cannot be parsed. """ if m_dict['index'] is not None: raise ValueError('Assignment of a list to a list index.') elements = filter(None, re.split('[ ,]', m_dict['vals'])) # Make sure the name hasn't already been assigned a value if name in results_dictionary: raise _reuse_fail(name, values) try: results_dictionary[name] = [parse_fn(e) for e in elements] except ValueError: _parse_fail(name, var_type, m_dict['vals'], values)
Cast hparam to the provided type, if compatible. Args: name: Name of the hparam to be cast. param_type: The type of the hparam. value: The value to be cast, if compatible. Returns: The result of casting `value` to `param_type`. Raises: ValueError: If the type of `value` is not compatible with param_type. * If `param_type` is a string type, but `value` is not. * If `param_type` is a boolean, but `value` is not, or vice versa. * If `param_type` is an integer type, but `value` is not. * If `param_type` is a float type, but `value` is not a numeric type.
def _cast_to_type_if_compatible(name, param_type, value): """Cast hparam to the provided type, if compatible. Args: name: Name of the hparam to be cast. param_type: The type of the hparam. value: The value to be cast, if compatible. Returns: The result of casting `value` to `param_type`. Raises: ValueError: If the type of `value` is not compatible with param_type. * If `param_type` is a string type, but `value` is not. * If `param_type` is a boolean, but `value` is not, or vice versa. * If `param_type` is an integer type, but `value` is not. * If `param_type` is a float type, but `value` is not a numeric type. """ fail_msg = ( "Could not cast hparam '%s' of type '%s' from value %r" % (name, param_type, value)) # Some callers use None, for which we can't do any casting/checking. :( if issubclass(param_type, type(None)): return value # Avoid converting a non-string type to a string. if (issubclass(param_type, (six.string_types, six.binary_type)) and not isinstance(value, (six.string_types, six.binary_type))): raise ValueError(fail_msg) # Avoid converting a number or string type to a boolean or vice versa. if issubclass(param_type, bool) != isinstance(value, bool): raise ValueError(fail_msg) # Avoid converting float to an integer (the reverse is fine). if (issubclass(param_type, numbers.Integral) and not isinstance(value, numbers.Integral)): raise ValueError(fail_msg) # Avoid converting a non-numeric type to a numeric type. if (issubclass(param_type, numbers.Number) and not isinstance(value, numbers.Number)): raise ValueError(fail_msg) return param_type(value)
Parses hyperparameter values from a string into a python map. `values` is a string containing comma-separated `name=value` pairs. For each pair, the value of the hyperparameter named `name` is set to `value`. If a hyperparameter name appears multiple times in `values`, a ValueError is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). If a hyperparameter name in both an index assignment and scalar assignment, a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). The hyperparameter name may contain '.' symbols, which will result in an attribute name that is only accessible through the getattr and setattr functions. (And must be first explicit added through add_hparam.) WARNING: Use of '.' in your variable names is allowed, but is not well supported and not recommended. The `value` in `name=value` must follows the syntax according to the type of the parameter: * Scalar integer: A Python-parsable integer point value. E.g.: 1, 100, -12. * Scalar float: A Python-parsable floating point value. E.g.: 1.0, -.54e89. * Boolean: Either true or false. * Scalar string: A non-empty sequence of characters, excluding comma, spaces, and square brackets. E.g.: foo, bar_1. * List: A comma separated list of scalar values of the parameter type enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. When index assignment is used, the corresponding type_map key should be the list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not "arr[1]"). Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. type_map: A dictionary mapping hyperparameter names to types. Note every parameter name in values must be a key in type_map. The values must conform to the types indicated, where a value V is said to conform to a type T if either V has type T, or V is a list of elements of type T. Hence, for a multidimensional parameter 'x' taking float values, 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. ignore_unknown: Bool. Whether values that are missing a type in type_map should be ignored. If set to True, a ValueError will not be raised for unknown hyperparameter type. Returns: A python map mapping each name to either: * A scalar value. * A list of scalar values. * A dictionary mapping index numbers to scalar values. (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") Raises: ValueError: If there is a problem with input. * If `values` cannot be parsed. * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2', or 'a=1,a=[1]')
def parse_values(values, type_map, ignore_unknown=False): """Parses hyperparameter values from a string into a python map. `values` is a string containing comma-separated `name=value` pairs. For each pair, the value of the hyperparameter named `name` is set to `value`. If a hyperparameter name appears multiple times in `values`, a ValueError is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). If a hyperparameter name in both an index assignment and scalar assignment, a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). The hyperparameter name may contain '.' symbols, which will result in an attribute name that is only accessible through the getattr and setattr functions. (And must be first explicit added through add_hparam.) WARNING: Use of '.' in your variable names is allowed, but is not well supported and not recommended. The `value` in `name=value` must follows the syntax according to the type of the parameter: * Scalar integer: A Python-parsable integer point value. E.g.: 1, 100, -12. * Scalar float: A Python-parsable floating point value. E.g.: 1.0, -.54e89. * Boolean: Either true or false. * Scalar string: A non-empty sequence of characters, excluding comma, spaces, and square brackets. E.g.: foo, bar_1. * List: A comma separated list of scalar values of the parameter type enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. When index assignment is used, the corresponding type_map key should be the list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not "arr[1]"). Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. type_map: A dictionary mapping hyperparameter names to types. Note every parameter name in values must be a key in type_map. The values must conform to the types indicated, where a value V is said to conform to a type T if either V has type T, or V is a list of elements of type T. Hence, for a multidimensional parameter 'x' taking float values, 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. ignore_unknown: Bool. Whether values that are missing a type in type_map should be ignored. If set to True, a ValueError will not be raised for unknown hyperparameter type. Returns: A python map mapping each name to either: * A scalar value. * A list of scalar values. * A dictionary mapping index numbers to scalar values. (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") Raises: ValueError: If there is a problem with input. * If `values` cannot be parsed. * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2', or 'a=1,a=[1]') """ results_dictionary = {} pos = 0 while pos < len(values): m = PARAM_RE.match(values, pos) if not m: raise ValueError('Malformed hyperparameter value: %s' % values[pos:]) # Check that there is a comma between parameters and move past it. pos = m.end() # Parse the values. m_dict = m.groupdict() name = m_dict['name'] if name not in type_map: if ignore_unknown: continue raise ValueError('Unknown hyperparameter type for %s' % name) type_ = type_map[name] # Set up correct parsing function (depending on whether type_ is a bool) if type_ == bool: def parse_bool(value): if value in ['true', 'True']: return True elif value in ['false', 'False']: return False else: try: return bool(int(value)) except ValueError: _parse_fail(name, type_, value, values) parse = parse_bool else: parse = type_ # If a singe value is provided if m_dict['val'] is not None: _process_scalar_value(name, parse, type_, m_dict, values, results_dictionary) # If the assigned value is a list: elif m_dict['vals'] is not None: _process_list_value(name, parse, type_, m_dict, values, results_dictionary) else: # Not assigned a list or value _parse_fail(name, type_, '', values) return results_dictionary
Adds {name, value} pair to hyperparameters. Args: name: Name of the hyperparameter. value: Value of the hyperparameter. Can be one of the following types: int, float, string, int list, float list, or string list. Raises: ValueError: if one of the arguments is invalid.
def add_hparam(self, name, value): """Adds {name, value} pair to hyperparameters. Args: name: Name of the hyperparameter. value: Value of the hyperparameter. Can be one of the following types: int, float, string, int list, float list, or string list. Raises: ValueError: if one of the arguments is invalid. """ # Keys in kwargs are unique, but 'name' could the name of a pre-existing # attribute of this object. In that case we refuse to use it as a # hyperparameter name. if getattr(self, name, None) is not None: raise ValueError('Hyperparameter name is reserved: %s' % name) if isinstance(value, (list, tuple)): if not value: raise ValueError( 'Multi-valued hyperparameters cannot be empty: %s' % name) self._hparam_types[name] = (type(value[0]), True) else: self._hparam_types[name] = (type(value), False) setattr(self, name, value)
Set the value of an existing hyperparameter. This function verifies that the type of the value matches the type of the existing hyperparameter. Args: name: Name of the hyperparameter. value: New value of the hyperparameter. Raises: KeyError: If the hyperparameter doesn't exist. ValueError: If there is a type mismatch.
def set_hparam(self, name, value): """Set the value of an existing hyperparameter. This function verifies that the type of the value matches the type of the existing hyperparameter. Args: name: Name of the hyperparameter. value: New value of the hyperparameter. Raises: KeyError: If the hyperparameter doesn't exist. ValueError: If there is a type mismatch. """ param_type, is_list = self._hparam_types[name] if isinstance(value, list): if not is_list: raise ValueError( 'Must not pass a list for single-valued parameter: %s' % name) setattr(self, name, [ _cast_to_type_if_compatible(name, param_type, v) for v in value]) else: if is_list: raise ValueError( 'Must pass a list for multi-valued parameter: %s.' % name) setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter.
def del_hparam(self, name): """Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter. """ if hasattr(self, name): delattr(self, name) del self._hparam_types[name]
Override existing hyperparameter values, parsing new values from a string. See parse_values for more detail on the allowed format for values. Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. Returns: The `HParams` instance. Raises: ValueError: If `values` cannot be parsed or a hyperparameter in `values` doesn't exist.
def parse(self, values): """Override existing hyperparameter values, parsing new values from a string. See parse_values for more detail on the allowed format for values. Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. Returns: The `HParams` instance. Raises: ValueError: If `values` cannot be parsed or a hyperparameter in `values` doesn't exist. """ type_map = {} for name, t in self._hparam_types.items(): param_type, _ = t type_map[name] = param_type values_map = parse_values(values, type_map) return self.override_from_dict(values_map)
Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed.
def override_from_dict(self, values_dict): """Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed. """ for name, value in values_dict.items(): self.set_hparam(name, value) return self
Serializes the hyperparameters into JSON. Args: indent: If a non-negative integer, JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0, or negative, will only insert newlines. `None` (the default) selects the most compact representation. separators: Optional `(item_separator, key_separator)` tuple. Default is `(', ', ': ')`. sort_keys: If `True`, the output dictionaries will be sorted by key. Returns: A JSON string.
def to_json(self, indent=None, separators=None, sort_keys=False): """Serializes the hyperparameters into JSON. Args: indent: If a non-negative integer, JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0, or negative, will only insert newlines. `None` (the default) selects the most compact representation. separators: Optional `(item_separator, key_separator)` tuple. Default is `(', ', ': ')`. sort_keys: If `True`, the output dictionaries will be sorted by key. Returns: A JSON string. """ def remove_callables(x): """Omit callable elements from input with arbitrary nesting.""" if isinstance(x, dict): return {k: remove_callables(v) for k, v in six.iteritems(x) if not callable(v)} elif isinstance(x, list): return [remove_callables(i) for i in x if not callable(i)] return x return json.dumps( remove_callables(self.values()), indent=indent, separators=separators, sort_keys=sort_keys)
Override existing hyperparameter values, parsing new values from a json object. Args: values_json: String containing a json object of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_json` doesn't exist. ValueError: If `values_json` cannot be parsed.
def parse_json(self, values_json): """Override existing hyperparameter values, parsing new values from a json object. Args: values_json: String containing a json object of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_json` doesn't exist. ValueError: If `values_json` cannot be parsed. """ values_map = json.loads(values_json) return self.override_from_dict(values_map)
Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values.
def values(self): """Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values. """ return {n: getattr(self, n) for n in self._hparam_types.keys()}
Returns the value of `key` if it exists, else `default`.
def get(self, key, default=None): """Returns the value of `key` if it exists, else `default`.""" if key in self._hparam_types: # Ensure that default is compatible with the parameter type. if default is not None: param_type, is_param_list = self._hparam_types[key] type_str = 'list<%s>' % param_type if is_param_list else str(param_type) fail_msg = ("Hparam '%s' of type '%s' is incompatible with " 'default=%s' % (key, type_str, default)) is_default_list = isinstance(default, list) if is_param_list != is_default_list: raise ValueError(fail_msg) try: if is_default_list: for value in default: _cast_to_type_if_compatible(key, param_type, value) else: _cast_to_type_if_compatible(key, param_type, default) except ValueError as e: raise ValueError('%s. %s' % (fail_msg, e)) return getattr(self, key) return default
Returns the field name given parameter type and is_list. Args: param_type: Data type of the hparam. is_list: Whether this is a list. Returns: A string representation of the field name. Raises: ValueError: If parameter type is not recognized.
def _get_kind_name(param_type, is_list): """Returns the field name given parameter type and is_list. Args: param_type: Data type of the hparam. is_list: Whether this is a list. Returns: A string representation of the field name. Raises: ValueError: If parameter type is not recognized. """ if issubclass(param_type, bool): # This check must happen before issubclass(param_type, six.integer_types), # since Python considers bool to be a subclass of int. typename = 'bool' elif issubclass(param_type, six.integer_types): # Setting 'int' and 'long' types to be 'int64' to ensure the type is # compatible with both Python2 and Python3. typename = 'int64' elif issubclass(param_type, (six.string_types, six.binary_type)): # Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is # compatible with both Python2 and Python3. typename = 'bytes' elif issubclass(param_type, float): typename = 'float' else: raise ValueError('Unsupported parameter type: %s' % str(param_type)) suffix = 'list' if is_list else 'value' return '_'.join([typename, suffix])
Returns the visualizations for query. Args: query: The query to process. Returns: A dictionary of results with processing and graph visualizations.
def process(self, query): """Returns the visualizations for query. Args: query: The query to process. Returns: A dictionary of results with processing and graph visualizations. """ tf.logging.info("Processing new query [%s]" %query) # Create the new TFDBG hook directory. hook_dir = "/tmp/t2t_server_dump/request_%d" %int(time.time()) os.makedirs(hook_dir) hooks = [tfdbg.DumpingDebugHook(hook_dir, watch_fn=topk_watch_fn)] # TODO(kstevens): This is extremely hacky and slow for responding to # queries. Figure out a reasonable way to pre-load the model weights before # forking and run queries through the estimator quickly. def server_input_fn(): """Generator that returns just the current query.""" for _ in range(1): input_ids = self.source_vocab.encode(query) input_ids.append(text_encoder.EOS_ID) x = [1, 100, len(input_ids)] + input_ids x += [0] * (self.const_array_size - len(x)) d = { "inputs": np.array(x).astype(np.int32), } yield d def input_fn(): """Generator that returns just the current query.""" gen_fn = decoding.make_input_fn_from_generator(server_input_fn()) example = gen_fn() # TODO(kstevens): Make this method public # pylint: disable=protected-access return decoding._interactive_input_tensor_to_features_dict( example, self.hparams) # Make the prediction for the current query. result_iter = self.estimator.predict(input_fn, hooks=hooks) result = None for result in result_iter: break # Extract the beam search information by reading the dumped TFDBG event # tensors. We first read and record the per step beam sequences then record # the beam scores. Afterwards we align the two sets of values to create the # full graph vertices and edges. decoding_graph = graph.Graph() run_dirs = sorted(glob.glob(os.path.join(hook_dir, "run_*"))) for run_dir in run_dirs: # Record the different completed and active beam sequence ids. alive_sequences = deque() finished_sequences = deque() # Make the root vertex since it always needs to exist. decoding_graph.get_vertex(sequence_key([0])) # Create the initial vertices and edges for the active and finished # sequences. We uniquely define each vertex using it's full sequence path # as a string to ensure there's no collisions when the same step has two # instances of an output id. dump_dir = tfdbg.DebugDumpDir(run_dir, validate=False) seq_datums = dump_dir.find(predicate=seq_filter) for seq_datum in seq_datums: sequences = np.array(seq_datum.get_tensor()).astype(int)[0] if "alive" in seq_datum.node_name: alive_sequences.append(sequences) if "finished" in seq_datum.node_name: finished_sequences.append(sequences) for sequence in sequences: pieces = self.targets_vocab.decode_list(sequence) index = sequence[-1] if index == 0: continue parent = decoding_graph.get_vertex(sequence_key(sequence[:-1])) current = decoding_graph.get_vertex(sequence_key(sequence)) edge = decoding_graph.add_edge(parent, current) edge.data["label"] = pieces[-1] edge.data["label_id"] = index # Coerce the type to be a python bool. Numpy bools can't be easily # converted to JSON. edge.data["completed"] = bool(index == 1) # Examine the score results and store the scores with the associated edges # in the graph. We fetch the vertices (and relevant edges) by looking # into the saved beam sequences stored above. score_datums = dump_dir.find(predicate=scores_filter) for score_datum in score_datums: if "alive" in score_datum.node_name: sequences = alive_sequences.popleft() if "finished" in score_datum.node_name: sequences = finished_sequences.popleft() scores = np.array(score_datum.get_tensor()).astype(float)[0] for i, score in enumerate(scores): sequence = sequences[i] if sequence[-1] == 0: continue vertex = decoding_graph.get_vertex(sequence_key(sequence)) edge = decoding_graph.edges[vertex.in_edges[0]] edge.data["score"] = score edge.data["log_probability"] = score edge.data["total_log_probability"] = score # Delete the hook dir to save disk space shutil.rmtree(hook_dir) # Create the graph visualization data structure. graph_vis = { "visualization_name": "graph", "title": "Graph", "name": "graph", "search_graph": decoding_graph.to_dict(), } # Create the processing visualization data structure. # TODO(kstevens): Make this method public # pylint: disable=protected-access output_ids = decoding._save_until_eos(result["outputs"].flatten(), False) output_pieces = self.targets_vocab.decode_list(output_ids) output_token = [{"text": piece} for piece in output_pieces] output = self.targets_vocab.decode(output_ids) source_steps = [{ "step_name": "Initial", "segment": [{ "text": query }], }] target_steps = [{ "step_name": "Initial", "segment": output_token, }, { "step_name": "Final", "segment": [{ "text": output }], }] processing_vis = { "visualization_name": "processing", "title": "Processing", "name": "processing", "query_processing": { "source_processing": source_steps, "target_processing": target_steps, }, } return { "result": [processing_vis, graph_vis], }