text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Set of hyperparameters for lm1b packed following tpu params.
<END_TASK>
<USER_TASK:>
Description:
def transformer_base_vq_ada_32ex_packed():
"""Set of hyperparameters for lm1b packed following tpu params.""" |
hparams = transformer_base_v2()
expert_utils.update_hparams_for_vq_gating(hparams)
hparams.moe_num_experts = 32
hparams.gating_type = "vq"
# this gives us a batch size of 16 because each seq is len 256
hparams.batch_size = 5072
hparams.ffn_layer = "local_moe"
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_warmup_steps = 10000
# one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128
hparams.learning_rate_decay_steps = 27200
hparams.num_heads = 4
hparams.num_blocks = 1
hparams.moe_k = 1
hparams.num_decoder_layers = 6
hparams.label_smoothing = 0.
hparams.layer_prepostprocess_dropout = 0.1
hparams.layer_postprocess_sequence = "dan"
hparams.layer_preprocess_sequence = "none"
hparams.weight_decay = 1e-06
hparams.attention_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.activation_dtype = "float32"
hparams.learning_rate = 0.1
hparams.learning_rate_constant = 1.0
return hparams |
<SYSTEM_TASK:>
Base parameters for Transformer model.
<END_TASK>
<USER_TASK:>
Description:
def transformer_base_v3():
"""Base parameters for Transformer model.""" |
# Update parameters here, then occasionally cut a versioned set, e.g.
# transformer_base_v2.
hparams = transformer_base_v2()
hparams.optimizer_adam_beta2 = 0.997
# New way of specifying learning rate schedule.
# Equivalent to previous version.
hparams.learning_rate_schedule = (
"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size")
hparams.learning_rate_constant = 2.0
return hparams |
<SYSTEM_TASK:>
HParams for transformer big model on WMT.
<END_TASK>
<USER_TASK:>
Description:
def transformer_big():
"""HParams for transformer big model on WMT.""" |
hparams = transformer_base()
hparams.hidden_size = 1024
hparams.filter_size = 4096
# Reduce batch size to 2048 from 4096 to be able to train the model on a GPU
# with 12 GB memory. For example, NVIDIA TITAN V GPU.
hparams.batch_size = 2048
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
return hparams |
<SYSTEM_TASK:>
Hparams for transformer on LM for finetuning on text class problems.
<END_TASK>
<USER_TASK:>
Description:
def transformer_tall_finetune_textclass():
"""Hparams for transformer on LM for finetuning on text class problems.""" |
hparams = transformer_tall()
hparams.learning_rate_constant = 6.25e-5
hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay")
hparams.multiproblem_schedule_max_examples = 0
hparams.multiproblem_target_eval_only = True
hparams.learning_rate_warmup_steps = 50
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 25000
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 0.95
return hparams |
<SYSTEM_TASK:>
Hparams for transformer on LM pretraining on TPU, large model.
<END_TASK>
<USER_TASK:>
Description:
def transformer_tall_pretrain_lm_tpu_adafactor_large():
"""Hparams for transformer on LM pretraining on TPU, large model.""" |
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2
hparams.batch_size = 4
hparams.multiproblem_mixing_schedule = "constant"
# Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad.
hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5"
return hparams |
<SYSTEM_TASK:>
Hparams for transformer on LM pretraining on TPU with AdamW.
<END_TASK>
<USER_TASK:>
Description:
def transformer_tall_pretrain_lm_tpu():
"""Hparams for transformer on LM pretraining on TPU with AdamW.""" |
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
# Optimizer gets reset in update_hparams_for_tpu so we set it again here.
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup * constant * cosdecay")
hparams.optimizer = "adam_w"
return hparams |
<SYSTEM_TASK:>
HParams for transformer base model for single GPU.
<END_TASK>
<USER_TASK:>
Description:
def transformer_base_single_gpu():
"""HParams for transformer base model for single GPU.""" |
hparams = transformer_base()
hparams.batch_size = 1024
hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay"
hparams.learning_rate_constant = 0.1
hparams.learning_rate_warmup_steps = 16000
return hparams |
<SYSTEM_TASK:>
Use relative position embeddings instead of absolute position encodings.
<END_TASK>
<USER_TASK:>
Description:
def transformer_relative():
"""Use relative position embeddings instead of absolute position encodings.""" |
hparams = transformer_base()
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 20
return hparams |
<SYSTEM_TASK:>
Change hparams to be compatible with TPU training.
<END_TASK>
<USER_TASK:>
Description:
def update_hparams_for_tpu(hparams):
"""Change hparams to be compatible with TPU training.""" |
# Adafactor uses less memory than Adam.
# switch to Adafactor with its recommended learning rate scheme.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
# Avoid an expensive concat on TPU.
# >1 shards helps with faster parameter distribution on multi-GPU machines
hparams.symbol_modality_num_shards = 1
# Adaptive batch sizes and sequence lengths are not supported on TPU.
# Instead, every batch has the same sequence length and the same batch size.
# Longer sequences are dropped and shorter ones are padded.
#
# It is therefore suggested to use a problem where examples have been combined
# to a longer length, e.g. the "_packed" problems.
#
# For problems with variable sequence lengths, this parameter controls the
# maximum sequence length. Shorter sequences are dropped and longer ones
# are padded.
#
# For problems with fixed sequence lengths - e.g. the "_packed" problems,
# this hyperparameter is ignored.
hparams.max_length = 64
# TPUs have less memory than GPUs, so decrease the batch size
hparams.batch_size = 2048
# Using noise broadcast in the dropout layers saves memory during training.
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
return hparams |
<SYSTEM_TASK:>
No dropout, label smoothing, max_length.
<END_TASK>
<USER_TASK:>
Description:
def transformer_clean():
"""No dropout, label smoothing, max_length.""" |
hparams = transformer_base_v2()
hparams.label_smoothing = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.max_length = 0
return hparams |
<SYSTEM_TASK:>
Hparams for machine translation with ~1.1B parameters.
<END_TASK>
<USER_TASK:>
Description:
def transformer_tpu_1b():
"""Hparams for machine translation with ~1.1B parameters.""" |
hparams = transformer_tpu()
hparams.hidden_size = 2048
hparams.filter_size = 8192
hparams.num_hidden_layers = 8
# smaller batch size to avoid OOM
hparams.batch_size = 1024
hparams.activation_dtype = "bfloat16"
hparams.weight_dtype = "bfloat16"
# maximize number of parameters relative to computation by not sharing.
hparams.shared_embedding_and_softmax_weights = False
return hparams |
<SYSTEM_TASK:>
HParams for training languagemodel_wikitext103_l4k with memory.
<END_TASK>
<USER_TASK:>
Description:
def transformer_wikitext103_l4k_memory_v0():
"""HParams for training languagemodel_wikitext103_l4k with memory.""" |
hparams = transformer_wikitext103_l4k_v0()
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = 64
hparams.split_targets_strided_training = True
hparams.add_hparam("memory_type", "transformer_xl")
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length)) # 262144
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
hparams.add_hparam("unconditional", True)
hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess
# By default, cache one chunk only (like Transformer-XL)
hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length)
return hparams |
<SYSTEM_TASK:>
HParams for training languagemodel_wikitext103_l16k with memory.
<END_TASK>
<USER_TASK:>
Description:
def transformer_wikitext103_l16k_memory_v0():
"""HParams for training languagemodel_wikitext103_l16k with memory.""" |
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.max_length = 16384
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
return hparams |
<SYSTEM_TASK:>
HParams for training image_cifar10_plain_gen_flat_rev with memory.
<END_TASK>
<USER_TASK:>
Description:
def transformer_cifar10_memory_v0():
"""HParams for training image_cifar10_plain_gen_flat_rev with memory.""" |
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.num_hidden_layers = 6
hparams.max_length = 32 * 32 * 3
hparams.split_targets_chunk_length = 64 * 3
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
hparams.num_memory_items = 128 * 3
# Since this is an image problem, batch size refers to examples (not tokens)
target_images_per_batch = 4
hparams.batch_size = int(target_images_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
# The recurrent memory needs to know the actual batch size (in sequences)
hparams.recurrent_memory_batch_size = hparams.batch_size
hparams.max_relative_position = (
hparams.num_memory_items + hparams.split_targets_chunk_length)
return hparams |
<SYSTEM_TASK:>
Local within block self attention.
<END_TASK>
<USER_TASK:>
Description:
def local_within_block_attention(x,
self_attention_bias,
hparams,
attention_type="local_within_block_mask_right",
q_padding="VALID",
kv_padding="VALID"):
"""Local within block self attention.""" |
x_new, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("local_within_block"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x_new, hparams),
None,
self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
name="local_within_block")
if is_4d:
y = tf.reshape(y, x_shape)
return y |
<SYSTEM_TASK:>
Dilated attention with a masking strategy.
<END_TASK>
<USER_TASK:>
Description:
def get_dilated_1d_attention_mask(
num_heads, block_size,
num_blocks, memory_size, gap_size,
name="dilated_mask"):
"""Dilated attention with a masking strategy.""" |
mask = np.ones((num_heads, block_size, 2*block_size), np.bool)
# now going over every row to do the right assignment of
# memory blocks
for i in range(block_size):
visible = 2*block_size - (block_size-i)
# You always attend to yourself, set the mask for that
mask[:, i, -(block_size - i)] = 0
# Maybe num_blocks can be automatically calculated?
for j in range(num_blocks):
for k in range(memory_size):
index = ((gap_size + memory_size)*j) + k
if index >= visible:
break
mask[:, i, -(index + block_size - i + 1)] = 0 # Verify
# adding a num blocks dimension
mask = np.expand_dims(mask, axis=1)
return tf.constant(mask, dtype=tf.int32, name=name) |
<SYSTEM_TASK:>
Multi layer transformer.
<END_TASK>
<USER_TASK:>
Description:
def transformer_decoder_layers(inputs,
encoder_output,
num_layers,
hparams,
self_attention_bias=None,
encoder_decoder_attention_bias=None,
attention_type=AttentionType.LOCAL_2D,
losses=None,
name="transformer"):
"""Multi layer transformer.""" |
x = inputs
x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout)
if attention_type == AttentionType.DILATED:
assert len(hparams.gap_sizes) == num_layers
for layer in range(num_layers):
with tf.variable_scope("%s_layer_%d" % (name, layer)):
# self-attention + skip connections
if attention_type == AttentionType.LOCAL_2D:
y = local_attention_2d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="masked_local_attention_2d")
elif attention_type == AttentionType.LOCAL_1D:
y = local_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_mask_right",
q_padding="LEFT", kv_padding="LEFT")
elif attention_type == AttentionType.RELATIVE_LOCAL_1D:
y = local_attention_1d(
common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_relative_mask_right",
q_padding="LEFT",
kv_padding="LEFT")
elif attention_type == AttentionType.NON_CAUSAL_1D:
y = local_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_unmasked",
q_padding="VALID", kv_padding="VALID")
elif attention_type == AttentionType.LOCAL_BLOCK:
y = local_within_block_attention(
common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
attention_type="local_within_block_mask_right",
q_padding="LEFT", kv_padding="LEFT")
elif attention_type == AttentionType.GLOCAL:
y = local_global_attention(common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
q_padding="LEFT", kv_padding="LEFT")
elif attention_type == AttentionType.DILATED:
y = dilated_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams, q_padding="LEFT",
kv_padding="LEFT",
gap_size=hparams.gap_sizes[layer])
elif attention_type == AttentionType.GLOBAL:
y = full_self_attention(common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
q_padding="LEFT", kv_padding="LEFT")
x = common_layers.layer_postprocess(x, y, hparams)
# enc-dec attention + skip connections
if encoder_output is not None:
y = encdec_attention_1d(common_layers.layer_preprocess(x, hparams),
encoder_output,
encoder_decoder_attention_bias,
hparams)
x = common_layers.layer_postprocess(x, y, hparams)
# feed-fwd layers + skip connections
y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams,
losses=losses)
x = common_layers.layer_postprocess(x, y, hparams)
return common_layers.layer_preprocess(x, hparams) |
<SYSTEM_TASK:>
Multi layer transformer encoder.
<END_TASK>
<USER_TASK:>
Description:
def transformer_encoder_layers(inputs,
num_layers,
hparams,
attention_type=AttentionType.GLOBAL,
self_attention_bias=None,
q_padding="VALID",
kv_padding="VALID",
name="transformer"):
"""Multi layer transformer encoder.""" |
x = inputs
x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout)
for layer in range(num_layers):
# attention layers + skip connections
with tf.variable_scope("%s_layer_%d" % (name, layer)):
if attention_type == AttentionType.LOCAL_2D:
y = local_attention_2d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_attention_2d")
elif attention_type == AttentionType.LOCAL_1D:
y = local_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_unmasked",
q_padding=q_padding, kv_padding=kv_padding)
elif attention_type == AttentionType.GLOBAL:
y = full_self_attention(common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
q_padding=q_padding, kv_padding=kv_padding)
x = common_layers.layer_postprocess(x, y, hparams)
# feed-fwd layer + skip connections
y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams)
x = common_layers.layer_postprocess(x, y, hparams)
return common_layers.layer_preprocess(x, hparams) |
<SYSTEM_TASK:>
ffn layer transformer.
<END_TASK>
<USER_TASK:>
Description:
def ffn_layer(x, hparams, losses=None):
"""ffn layer transformer.""" |
with tf.variable_scope("ffn"):
if hparams.ffn_layer == "none":
return x
if hparams.ffn_layer == "conv_hidden_relu":
y = common_layers.dense_relu_dense(
x,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
elif hparams.ffn_layer == "normed_conv_hidden_relu":
y = common_layers.normed_conv_hidden_relu(
x,
hparams.norm_type,
hparams.layer_norm_epsilon,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
norm_name="convnorm")
elif hparams.ffn_layer == "self_attention_ffn":
x_shape = tf.shape(x)
x = tf.reshape(x, [x_shape[0], -1, hparams.hidden_size])
y = common_attention.ffn_self_attention_layer(
x, hparams.filter_size, hparams.hidden_size, hparams.num_parts,
hparams.attention_dropout, hparams.share_kv)
y = tf.reshape(y, x_shape)
elif hparams.ffn_layer == "local_moe_tpu":
overhead = (hparams.moe_overhead_train
if hparams.mode == tf.estimator.ModeKeys.TRAIN
else hparams.moe_overhead_eval)
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
y, loss = expert_utils.local_moe_tpu(
x, hparams.filter_size // 2,
hparams.hidden_size,
hparams.moe_num_experts, overhead=overhead,
loss_coef=hparams.moe_loss_coef)
if is_4d:
y = tf.reshape(y, x_shape)
if losses is None:
raise ValueError(
"transformer_ffn_layer with type local_moe_tpu must pass in "
"a losses list")
losses.append(loss)
else:
assert hparams.ffn_layer == "glu_ffn"
y = common_layers.gated_linear_unit_layer(x)
return y |
<SYSTEM_TASK:>
Postprocessing after decoding.
<END_TASK>
<USER_TASK:>
Description:
def postprocess_image(x, rows, cols, hparams):
"""Postprocessing after decoding.
Args:
x: Tensor of shape [batch, ...], where ... can be any rank such that the
number of elements in x is batch * rows * cols * hparams.hidden_size.
rows: Integer representing number of rows in a 2-D data point.
cols: Integer representing number of columns in a 2-D data point.
hparams: HParams set.
Returns:
Tensor of shape [batch, rows, cols, depth], where depth is
hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In
the special case of inference and block raster scan order, it is a Tensor
of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width,
depth].
""" |
batch = common_layers.shape_list(x)[0]
x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size])
likelihood = getattr(hparams, "likelihood", DistributionType.CAT)
if likelihood == DistributionType.DMOL:
depth = hparams.num_mixtures * 10
targets = tf.layers.dense(x,
depth,
use_bias=False,
activation=None,
name="output_conv")
else:
depth = 256
targets = tf.layers.dense(x,
depth,
use_bias=True,
activation=None,
name="output_conv")
if (hparams.mode == tf.estimator.ModeKeys.PREDICT and
hparams.block_raster_scan):
y = targets
yshape = common_layers.shape_list(y)
block_length = hparams.query_shape[0]
block_width = hparams.query_shape[1]
# Break into block row wise.
y = tf.reshape(y,
[batch, yshape[1] // block_length, block_length,
yshape[2], depth])
yshape = common_layers.shape_list(y)
# Break into blocks width wise.
y_blocks = tf.reshape(y,
[batch, yshape[1], yshape[2],
yshape[3] // block_width, block_width, depth])
# Reshape targets as [batch, num_blocks_rows, num_block_cols, block_length,
# block_width, depth].
targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5])
return targets |
<SYSTEM_TASK:>
Creates output from decoder output and vars.
<END_TASK>
<USER_TASK:>
Description:
def create_output(decoder_output, rows, cols, targets, hparams):
"""Creates output from decoder output and vars.
Args:
decoder_output: Tensor of shape [batch, ...], where ... can be any rank such
that the number of elements is batch * rows * cols * hparams.hidden_size.
rows: Integer representing number of rows in a 2-D data point.
cols: Integer representing number of columns in a 2-D data point.
targets: Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_channels].
hparams: HParams set.
Returns:
Tensor of shape [batch, hparams.img_len, hparams.img_len,
hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise
[batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256].
In the special case of predict mode, it is a Tensor of rank 5.
""" |
del targets # unused arg
decoded_image = postprocess_image(decoder_output, rows, cols, hparams)
batch = common_layers.shape_list(decoded_image)[0]
depth = common_layers.shape_list(decoded_image)[-1]
likelihood = getattr(hparams, "likelihood", DistributionType.CAT)
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth])
output = y[:, :rows, :, :, :]
elif likelihood == DistributionType.CAT:
# Unpack the cols dimension of the Categorical.
channels = hparams.num_channels
output = tf.reshape(decoded_image,
[batch, rows, cols // channels, channels, depth])
else:
output = decoded_image
return output |
<SYSTEM_TASK:>
Get separate embedding for each of the channels.
<END_TASK>
<USER_TASK:>
Description:
def get_channel_embeddings(io_depth, targets, hidden_size, name="channel"):
"""Get separate embedding for each of the channels.""" |
targets_split = tf.split(targets, io_depth, axis=3)
rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name,
[256 * io_depth, hidden_size])
rgb_embedding_var = tf.identity(rgb_embedding_var)
rgb_embedding_var *= float(hidden_size)**0.5
channel_target_embs = []
for i in range(io_depth):
# Adding the channel offsets to get the right embedding since the
# embedding tensor has shape 256 * io_depth, hidden_size
target_ids = tf.squeeze(targets_split[i], axis=3) + i * 256
target_embs = common_layers.gather(rgb_embedding_var, target_ids)
channel_target_embs.append(target_embs)
return tf.concat(channel_target_embs, axis=-1) |
<SYSTEM_TASK:>
Decide whether to include a revision.
<END_TASK>
<USER_TASK:>
Description:
def include_revision(revision_num, skip_factor=1.1):
"""Decide whether to include a revision.
If the number of revisions is large, we exclude some revisions to avoid
a quadratic blowup in runtime, since the article is likely also large.
We make the ratio between consecutive included revision numbers
appproximately equal to "factor".
Args:
revision_num: an integer
skip_factor: a floating point number >= 1.0
Returns:
a boolean
""" |
if skip_factor <= 1.0:
return True
return (int(math.log1p(revision_num) / math.log(skip_factor)) != int(
math.log(revision_num + 2.0) / math.log(skip_factor))) |
<SYSTEM_TASK:>
Read wikipedia pages from a history dump.
<END_TASK>
<USER_TASK:>
Description:
def file_page_generator(my_file, max_page_size=2**28):
"""Read wikipedia pages from a history dump.
Since some pages can be terabytes in size (with all the revisions),
we limit page size to max_page_size bytes.
Args:
my_file: an open file object.
max_page_size: an integer
Yields:
strings
""" |
page_start = " <page>\n"
page_end = " </page>\n"
chunk_size = max_page_size
page_start = " <page>\n"
page_end = " </page>\n"
leftovers = ""
while True:
chunk = my_file.read(chunk_size)
if not chunk:
break
chunk = leftovers + chunk
current_pos = 0
while True:
start_pos = chunk.find(page_start, current_pos)
if start_pos == -1:
break
end_pos = chunk.find(page_end, start_pos)
if end_pos == -1:
if len(chunk) - start_pos > max_page_size:
leftovers = ""
else:
leftovers = chunk[start_pos:]
break
raw_page = chunk[start_pos + len(page_start):end_pos]
if len(raw_page) < max_page_size:
ret = parse_page(raw_page)
if ret:
yield ret
current_pos = end_pos + len(page_end) |
<SYSTEM_TASK:>
Extract the id from a page.
<END_TASK>
<USER_TASK:>
Description:
def get_id(page):
"""Extract the id from a page.
Args:
page: a string
Returns:
an integer
""" |
start_pos = page.find("<id>")
end_pos = page.find("</id>")
assert start_pos != -1
assert end_pos != -1
start_pos += len("<id>")
return int(page[start_pos:end_pos]) |
<SYSTEM_TASK:>
Extract the revisions of a page.
<END_TASK>
<USER_TASK:>
Description:
def get_revisions(page):
"""Extract the revisions of a page.
Args:
page: a string
Returns:
a list of strings
""" |
start_string = " <revision>\n"
end_string = " </revision>\n"
ret = []
current_pos = 0
while True:
start_pos = page.find(start_string, current_pos)
if start_pos == -1:
break
end_pos = page.find(end_string, start_pos)
assert end_pos != -1
ret.append(page[start_pos + len(start_string):end_pos])
current_pos = end_pos + len(end_string)
return ret |
<SYSTEM_TASK:>
Create a dictionary with title, id, and list of revisions.
<END_TASK>
<USER_TASK:>
Description:
def parse_page(raw_page):
"""Create a dictionary with title, id, and list of revisions.
The dictionary contains:
"title": a string
"id": an integer
"revisions": a list of strings
Args:
raw_page: a string
Returns:
a dictionary, or None in the case of an error.
""" |
ret = {"title": get_title(raw_page), "id": get_id(raw_page)}
if ":" in ret["title"]:
return None
ret["revisions"] = get_revisions(raw_page)
return ret |
<SYSTEM_TASK:>
Copy a file to a directory if it is not already there.
<END_TASK>
<USER_TASK:>
Description:
def maybe_copy_file_to_directory(source_filepath, target_directory):
"""Copy a file to a directory if it is not already there.
Returns the target filepath.
Args:
source_filepath: a string
target_directory: a string
Returns:
a string
""" |
if not tf.gfile.Exists(target_directory):
tf.logging.info("Creating directory %s" % target_directory)
os.mkdir(target_directory)
target_filepath = os.path.join(target_directory,
os.path.basename(source_filepath))
if not tf.gfile.Exists(target_filepath):
tf.logging.info("Copying %s to %s" % (source_filepath, target_filepath))
tf.gfile.Copy(source_filepath, target_filepath)
statinfo = os.stat(target_filepath)
tf.logging.info("Successfully copied %s, %s bytes." % (target_filepath,
statinfo.st_size))
else:
tf.logging.info("Not copying, file already found: %s" % target_filepath)
return target_filepath |
<SYSTEM_TASK:>
Generate pages from a list of .7z encoded history dumps.
<END_TASK>
<USER_TASK:>
Description:
def corpus_page_generator(corpus_files, tmp_dir, max_page_size_exp):
"""Generate pages from a list of .7z encoded history dumps.
Args:
corpus_files: a list of strings
tmp_dir: a string
max_page_size_exp: an integer
Yields:
strings
""" |
for remote_filepath in corpus_files:
filepath = maybe_copy_file_to_directory(remote_filepath, tmp_dir)
tf.logging.info("Reading from " + filepath)
command = ["7z", "x", "-so", filepath]
tf.logging.info("Running command: %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=-1)
for page in file_page_generator(p.stdout, 2**max_page_size_exp):
yield page |
<SYSTEM_TASK:>
Extract the text from a revision.
<END_TASK>
<USER_TASK:>
Description:
def get_text(revision, strip=True):
"""Extract the text from a revision.
Args:
revision: a string
strip: a boolean
Returns:
a string
""" |
# text start tag looks like "<text ..otherstuff>"
start_pos = revision.find("<text")
assert start_pos != -1
end_tag_pos = revision.find(">", start_pos)
assert end_tag_pos != -1
end_tag_pos += len(">")
end_pos = revision.find("</text>")
if end_pos == -1:
ret = ""
else:
ret = revision[end_tag_pos:end_pos]
if strip:
ret = strip_text(ret)
ret = text_encoder.to_unicode_utf8(ret)
return ret |
<SYSTEM_TASK:>
Remove everything in curly braces.
<END_TASK>
<USER_TASK:>
Description:
def _remove_curly_braces(text):
"""Remove everything in curly braces.
Curly braces may be nested, so we keep track of depth.
Args:
text: a string
Returns:
a string
""" |
current_pos = 0
depth = 0
ret = ""
for match in re.finditer("[{}]", text):
if depth == 0:
ret += text[current_pos:match.start()]
depth += 1 if text[match.start()] == "{" else -1
current_pos = match.end()
if depth != 0:
# Many articles have mismatched braces, but it still seems better to remove
# them than not.
pass
else:
ret += text[current_pos:]
return ret |
<SYSTEM_TASK:>
Remove double brackets, but leave the viewable text.
<END_TASK>
<USER_TASK:>
Description:
def _remove_double_brackets(text):
"""Remove double brackets, but leave the viewable text.
Args:
text: a string
Returns:
a string
""" |
def replacement_fn(s):
if ":" in s:
# this is probably a category or something like that.
return ""
# keep the part after the bar.
bar_pos = s.find("|")
if bar_pos == -1:
return s
return s[bar_pos + 1:]
return _find_and_replace(text, "[[", "]]", replacement_fn) |
<SYSTEM_TASK:>
Remove lines that do not start with a letter or a quote.
<END_TASK>
<USER_TASK:>
Description:
def _remove_boring_lines(text):
"""Remove lines that do not start with a letter or a quote.
From inspecting the data, this seems to leave in most prose and remove
most weird stuff.
Args:
text: a string
Returns:
a string
""" |
lines = text.split("\n")
filtered = [line for line in lines if re.match("[a-zA-z\"\']", line)]
return "\n".join(filtered) |
<SYSTEM_TASK:>
Get or generate the vocabulary.
<END_TASK>
<USER_TASK:>
Description:
def get_or_generate_vocabulary(data_dir,
tmp_dir,
data_prefix,
max_page_size_exp,
approx_vocab_size=32768,
strip=True):
"""Get or generate the vocabulary.
Args:
data_dir: a string
tmp_dir: a string
data_prefix: a string
max_page_size_exp: an integer
approx_vocab_size: an integer
strip: a boolean
Returns:
a TextEncoder
""" |
num_pages_for_vocab_generation = approx_vocab_size // 3
vocab_file = vocab_filename(approx_vocab_size, strip)
def my_generator(data_prefix):
"""Line generator for vocab."""
count = 0
for page in corpus_page_generator(
all_corpus_files(data_prefix)[::-1], tmp_dir, max_page_size_exp):
revisions = page["revisions"]
if revisions:
text = get_text(revisions[-1], strip=strip)
yield text
count += 1
if count % 100 == 0:
tf.logging.info("reading pages for vocab %d" % count)
if count > num_pages_for_vocab_generation:
break
return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file,
approx_vocab_size,
my_generator(data_prefix)) |
<SYSTEM_TASK:>
Get encoder from vocab file.
<END_TASK>
<USER_TASK:>
Description:
def get_encoder_from_vocab(vocab_filepath):
"""Get encoder from vocab file.
If vocab is not found in output dir, it will be copied there by
copy_vocab_to_output_dir to clarify the vocab used to generate the data.
Args:
vocab_filepath: path to vocab, either local or cns
Returns:
A SubwordTextEncoder vocabulary object. None if the output_parallel_text
is set.
""" |
if not tf.gfile.Exists(vocab_filepath):
raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath))
tf.logging.info("Found vocab file: %s", vocab_filepath)
encoder = text_encoder.SubwordTextEncoder(vocab_filepath)
return encoder |
<SYSTEM_TASK:>
Filter out examples that exceed max_edit_ratio between source and target.
<END_TASK>
<USER_TASK:>
Description:
def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0):
"""Filter out examples that exceed max_edit_ratio between source and target.
Args:
source_target_input: a list of [source, target] pairs
max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars
between source and target
Returns:
source_target_output: filtered subset of [source, target] input pairs
thrown_out_count: number of examples filtered out
""" |
thrown_out_count = 0
source_target_output = []
if not max_equal_to_diff_ratio:
return source_target_input, thrown_out_count
for src_tgt in source_target_input:
opcodes = fast_match_sequences(*src_tgt)
diff_char_count = 0
equal_char_count = 0
for tag, i1, i2, j1, j2 in opcodes:
if tag == "diff":
# max() prevents double-counting substitutions.
diff_char_count += max(i2 - i1, j2 - j1)
else:
equal_char_count += i2 - i1
if diff_char_count <= max_equal_to_diff_ratio * equal_char_count:
source_target_output.append(src_tgt)
else:
thrown_out_count += 1
return source_target_output, thrown_out_count |
<SYSTEM_TASK:>
Artificially add spelling errors and infill markers.
<END_TASK>
<USER_TASK:>
Description:
def introduce_errors(s,
corruption_rate=3e-3,
infill_marker="|?|",
max_infill_len=8):
"""Artificially add spelling errors and infill markers.
This function should be applied to the inputs of a correction model.
The artificial errors are particularly useful to train a network to
correct spelling when the training data does not contain many
natural errors.
Also replaces some substrings with an "infill" marker. e.g.
"the fat cat sat on the mat" -> "the fat ca??? the mat"
This causes the trained model to learn infilling (predicting what text
to insert at the current cursor position).
Args:
s: a string (the uncorrupted text)
corruption_rate: a floating point value. Probability of introducing an
error/infill at each character.
infill_marker: a string
max_infill_len: an optional integer - maximum number of characters to remove
and replace by an infill marker. None means no infilling.
Returns:
a string
""" |
num_errors = 0
ret = []
operations = [
"delete", # delete a character
"insert", # insert a random character from the input string
"replace", # replace a character with a random character from
# the input string
"transpose", # transpose two adjacent characters
]
if max_infill_len:
operations.append("infill")
pos = 0
while pos < len(s):
if random.random() >= corruption_rate:
ret.append(s[pos])
pos += 1
continue
num_errors += 1
operation = operations[random.randint(0, len(operations) - 1)]
if operation == "delete":
pos += 1
elif operation == "insert":
ret.append(s[random.randint(0, len(s) - 1)])
elif operation == "replace":
ret.append(s[random.randint(0, len(s) - 1)])
pos += 1
elif operation == "transpose":
ret.append(s[pos + 1] if pos + 1 < len(s) else "")
ret.append(s[pos])
pos += 2
else:
assert operation == "infill"
ret.append(infill_marker)
pos += random.randint(0, max_infill_len)
return "".join(ret), num_errors |
<SYSTEM_TASK:>
Compute diffs between two sequences.
<END_TASK>
<USER_TASK:>
Description:
def fast_match_sequences(a,
b,
a_start=0,
a_end=None,
b_start=0,
b_end=None,
min_match_length=3,
max_recursion_depth=128):
"""Compute diffs between two sequences.
This function is similar in functionality and spirit to
difflib.SequenceMatcher.get_opcodes, but it seems to run faster.
if a_start, a_end, b_start, b_end are specified, then we compute diffs of
the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices
are relative to the full sequence.
We try to match the longest matching segments first, but due to heuristics
in finding the matches, this is not guaranteed.
Matching segments shorter than min_match_length are counted as part of the
surrounding differing segments, unless they are at the beginning or end of
both sequences. This helps eliminate junk matches.
Args:
a: a sequence
b: a sequence
a_start: an optional integer
a_end: an optional integer
b_start: an optional integer
b_end: an optional integer
min_match_length: an integer
max_recursion_depth: an integer - avoids crashes in weird corner cases
involving pairs of long repetitive sequences.
Returns:
a list of 5-tuples (tag, i1, i2, j1, j2).
Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2].
tag is either "equal" or "diff". Note that the tags differ from those
returned by difflib.SequenceMatcher.get_opcodes.
""" |
if a_end is None:
a_end = len(a)
if b_end is None:
b_end = len(b)
if a_start == a_end and b_start == b_end:
return []
if a_start == a_end or b_start == b_end:
return [("diff", a_start, a_end, b_start, b_end)]
# Compute an index from value to first occurrence in the b segment.
# Technically, we should index and explore all occurrences of a value,
# but that might be much slower.
b_index = {}
for j in range(b_end - 1, b_start - 1, -1):
b_index[b[j]] = j
# we will look for the longest match we can find.
max_match_length = 0
a_pos = a_start
while a_pos < a_end:
val = a[a_pos]
b_pos = b_index.get(val)
if b_pos is None:
a_pos += 1
continue
else:
a_match_start = a_pos
a_match_end = a_pos + 1
b_match_start = b_pos
b_match_end = b_pos + 1
while (a_match_start > a_start and b_match_start > b_start and
a[a_match_start - 1] == b[b_match_start - 1]):
a_match_start -= 1
b_match_start -= 1
while (a_match_end < a_end and b_match_end < b_end and
a[a_match_end] == b[b_match_end]):
a_match_end += 1
b_match_end += 1
# Compute the length of the matching segment. We prefer the longest.
match_length = a_match_end - a_match_start
# Extra credit for matching at the beginning or end of the sequence.
if a_match_start == 0 and b_match_start == 0:
match_length += min_match_length
if a_match_end == len(a) and b_match_end == len(b):
match_length += min_match_length
if match_length > max_match_length:
max_match_length = match_length
best_match = (a_match_start, a_match_end, b_match_start, b_match_end)
# advance a_pos to the end of this match to avoid wasting time
# rediscovering this match.
a_pos = a_match_end
if max_match_length < min_match_length or max_recursion_depth == 0:
return [("diff", a_start, a_end, b_start, b_end)]
a_match_start, a_match_end, b_match_start, b_match_end = best_match
return (fast_match_sequences(
a, b, a_start, a_match_start, b_start, b_match_start, min_match_length,
max_recursion_depth - 1) + [
("equal", a_match_start, a_match_end, b_match_start, b_match_end)
] + fast_match_sequences(a, b, a_match_end, a_end, b_match_end, b_end,
min_match_length, max_recursion_depth - 1)) |
<SYSTEM_TASK:>
Load variables from checkpoint.
<END_TASK>
<USER_TASK:>
Description:
def begin(self):
"""Load variables from checkpoint.
New model variables have the following name foramt:
new_model_scope/old_model_scope/xxx/xxx:0 To find the map of
name to variable, need to strip the new_model_scope and then
match the old_model_scope and remove the suffix :0.
""" |
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
include=self._include, exclude=self._exclude)
# remove new_model_scope from variable name prefix
assignment_map = {variable.name[len(self._new_model_scope):]: variable
for variable in variables_to_restore
if variable.name.startswith(self._new_model_scope)}
# remove :0 from variable name suffix
assignment_map = {name.split(":")[0]: variable
for name, variable in six.iteritems(assignment_map)
if name.startswith(self._old_model_scope)}
self._assignment_map = assignment_map
tf.logging.info("restoring %d variables from checkpoint %s"%(
len(assignment_map), self._checkpoint_path))
tf.train.init_from_checkpoint(self._checkpoint_path, self._assignment_map) |
<SYSTEM_TASK:>
Creates a TimeStep with both rewards and actions as optional.
<END_TASK>
<USER_TASK:>
Description:
def create_time_step(cls,
observation=None,
done=False,
raw_reward=None,
processed_reward=None,
action=None):
"""Creates a TimeStep with both rewards and actions as optional.""" |
return cls(observation, done, raw_reward, processed_reward, action) |
<SYSTEM_TASK:>
Complete attention layer with preprocessing.
<END_TASK>
<USER_TASK:>
Description:
def attention(targets_shifted, inputs_encoded, norm_fn, hparams, bias=None):
"""Complete attention layer with preprocessing.""" |
separabilities = [hparams.separability, hparams.separability]
if hparams.separability < 0:
separabilities = [hparams.separability - 1, hparams.separability]
targets_timed = common_layers.subseparable_conv_block(
common_layers.add_timing_signal(targets_shifted),
hparams.hidden_size, [((1, 1), (5, 1)), ((4, 1), (5, 1))],
normalizer_fn=norm_fn,
padding="LEFT",
separabilities=separabilities,
name="targets_time")
if hparams.attention_type == "transformer":
targets_timed = tf.squeeze(targets_timed, 2)
target_shape = tf.shape(targets_timed)
targets_segment = tf.zeros([target_shape[0], target_shape[1]])
target_attention_bias = common_attention.attention_bias(
targets_segment, targets_segment, lower_triangular=True)
inputs_attention_bias = tf.zeros([
tf.shape(inputs_encoded)[0], hparams.num_heads,
tf.shape(targets_segment)[1],
tf.shape(inputs_encoded)[1]
])
qv = common_attention.multihead_attention(
targets_timed,
None,
target_attention_bias,
hparams.hidden_size,
hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
name="self_attention")
qv = common_attention.multihead_attention(
qv,
inputs_encoded,
inputs_attention_bias,
hparams.hidden_size,
hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
name="encdec_attention")
return tf.expand_dims(qv, 2)
elif hparams.attention_type == "simple":
targets_with_attention = common_layers.simple_attention(
targets_timed, inputs_encoded, bias=bias)
return norm_fn(targets_shifted + targets_with_attention, name="attn_norm") |
<SYSTEM_TASK:>
A stack of separable convolution blocks with residual connections.
<END_TASK>
<USER_TASK:>
Description:
def multi_conv_res(x, padding, name, layers, hparams, mask=None, source=None):
"""A stack of separable convolution blocks with residual connections.""" |
with tf.variable_scope(name):
padding_bias = None
if mask is not None:
padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding.
if padding == "LEFT": # Do not mask anything when left-padding.
mask = None
if (hparams.kernel_scheme in _KERNEL_SCHEMES and
hparams.dilation_scheme in _DILATION_SCHEMES):
kernels = _KERNEL_SCHEMES[hparams.kernel_scheme]
dilations = _DILATION_SCHEMES[hparams.dilation_scheme]
dilations_and_kernels = list(zip(dilations, kernels))
dilations_and_kernels1 = dilations_and_kernels[:2]
dilations_and_kernels2 = dilations_and_kernels[2:]
else:
k = (hparams.kernel_height, hparams.kernel_width)
k2 = (hparams.large_kernel_size, 1)
dilations_and_kernels1 = [((1, 1), k), ((1, 1), k)]
dilations_and_kernels2 = [((1, 1), k2), ((4, 4), k2)]
separabilities1 = [hparams.separability, hparams.separability]
separabilities2 = [hparams.separability] * len(dilations_and_kernels2)
if hparams.separability < 0:
separabilities1 = [hparams.separability - 1, hparams.separability]
separabilities2 = [
hparams.separability - i
for i in reversed(range(len(dilations_and_kernels2)))
]
def norm_fn(x, name):
with tf.variable_scope(name, default_name="norm"):
return common_layers.apply_norm(
x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon)
for layer in range(layers):
with tf.variable_scope("layer_%d" % layer):
y = common_layers.subseparable_conv_block(
x,
hparams.hidden_size,
dilations_and_kernels1,
normalizer_fn=norm_fn,
padding=padding,
mask=mask,
separabilities=separabilities1,
name="residual1")
x += common_layers.subseparable_conv_block(
x + y,
hparams.hidden_size,
dilations_and_kernels2,
normalizer_fn=norm_fn,
padding=padding,
mask=mask,
separabilities=separabilities2,
name="residual2") + y
if source is not None and hparams.attention_type != "none":
x += attention(x, source, norm_fn, hparams, bias=padding_bias)
if mask is not None:
x *= mask
return tf.nn.dropout(x, 1.0 - hparams.dropout) |
<SYSTEM_TASK:>
Experimental rank loss, thanks to kkurach@ for the code.
<END_TASK>
<USER_TASK:>
Description:
def rank_loss(sentence_emb, image_emb, margin=0.2):
"""Experimental rank loss, thanks to kkurach@ for the code.""" |
with tf.name_scope("rank_loss"):
# Normalize first as this is assumed in cosine similarity later.
sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
image_emb = tf.nn.l2_normalize(image_emb, 1)
# Both sentence_emb and image_emb have size [batch, depth].
scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch]
diagonal = tf.diag_part(scores) # [batch]
cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch]
cost_im = tf.maximum(
0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch]
# Clear diagonals.
batch_size = tf.shape(sentence_emb)[0]
empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
cost_s *= empty_diagonal_mat
cost_im *= empty_diagonal_mat
return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im) |
<SYSTEM_TASK:>
Loss telling to be more similar to your own targets than to others.
<END_TASK>
<USER_TASK:>
Description:
def similarity_cost(inputs_encoded, targets_encoded):
"""Loss telling to be more similar to your own targets than to others.""" |
# This is a first very simple version: handle variable-length by padding
# to same length and putting everything into batch. In need of a better way.
x, y = common_layers.pad_to_same_length(inputs_encoded, targets_encoded)
depth = tf.shape(inputs_encoded)[3]
x, y = tf.reshape(x, [-1, depth]), tf.reshape(y, [-1, depth])
return rank_loss(x, y) |
<SYSTEM_TASK:>
Middle part of slicenet, connecting encoder and decoder.
<END_TASK>
<USER_TASK:>
Description:
def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams):
"""Middle part of slicenet, connecting encoder and decoder.""" |
def norm_fn(x, name):
with tf.variable_scope(name, default_name="norm"):
return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size,
hparams.norm_epsilon)
# Flatten targets and embed target_space_id.
targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2)
target_space_emb = tf.tile(target_space_emb,
[tf.shape(targets_flat)[0], 1, 1, 1])
# Use attention from each target to look at input and retrieve.
targets_shifted = common_layers.shift_right(
targets_flat, pad_value=target_space_emb)
if hparams.attention_type == "none":
targets_with_attention = tf.zeros_like(targets_shifted)
else:
inputs_padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding.
targets_with_attention = attention(
targets_shifted,
inputs_encoded,
norm_fn,
hparams,
bias=inputs_padding_bias)
# Positional targets: merge attention and raw.
kernel = (hparams.kernel_height, hparams.kernel_width)
targets_merged = common_layers.subseparable_conv_block(
tf.concat([targets_with_attention, targets_shifted], axis=3),
hparams.hidden_size, [((1, 1), kernel)],
normalizer_fn=norm_fn,
padding="LEFT",
separability=4,
name="targets_merge")
return targets_merged, 0.0 |
<SYSTEM_TASK:>
The slicenet model, main step used for training.
<END_TASK>
<USER_TASK:>
Description:
def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True):
"""The slicenet model, main step used for training.""" |
with tf.variable_scope("slicenet"):
# Project to hidden size if necessary
if inputs.get_shape().as_list()[-1] != hparams.hidden_size:
inputs = common_layers.conv_block(
inputs,
hparams.hidden_size, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
force2d=True)
# Flatten inputs and encode.
inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2)
inputs_mask = 1.0 - embedding_to_padding(inputs)
inputs = common_layers.add_timing_signal(inputs) # Add position info.
target_space_emb = embed_target_space(target_space, hparams.hidden_size)
extra_layers = int(hparams.num_hidden_layers * 1.5)
inputs_encoded = multi_conv_res(
inputs, "SAME", "encoder", extra_layers, hparams, mask=inputs_mask)
if not run_decoder:
return inputs_encoded
# Do the middle part.
decoder_start, similarity_loss = slicenet_middle(
inputs_encoded, targets, target_space_emb, inputs_mask, hparams)
# Decode.
decoder_final = multi_conv_res(
decoder_start,
"LEFT",
"decoder",
hparams.num_hidden_layers,
hparams,
mask=inputs_mask,
source=inputs_encoded)
return decoder_final, tf.reduce_mean(similarity_loss) |
<SYSTEM_TASK:>
Version with Noam's decay scheme.
<END_TASK>
<USER_TASK:>
Description:
def slicenet_params1_noam():
"""Version with Noam's decay scheme.""" |
hparams = slicenet_params1()
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 1.0
hparams.learning_rate_warmup_steps = 4000
hparams.initializer = "uniform_unit_scaling"
hparams.optimizer_adam_epsilon = 1e-9
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
return hparams |
<SYSTEM_TASK:>
Version for fast local runs.
<END_TASK>
<USER_TASK:>
Description:
def slicenet_params1_tiny():
"""Version for fast local runs.""" |
hparams = slicenet_params1()
hparams.attention_type = "simple"
hparams.separability = 0
hparams.hidden_size = 128
hparams.num_hidden_layers = 2
hparams.batch_size = 512
hparams.learning_rate_warmup_steps = 200
return hparams |
<SYSTEM_TASK:>
decode ids back to tokens, considering OOVs temporary IDs.
<END_TASK>
<USER_TASK:>
Description:
def decode_list_oov(self, ids, source_oov_id_to_token):
"""decode ids back to tokens, considering OOVs temporary IDs.
Args:
ids: vocab ids. Could possibly include source temporary OOV ID starting
from vocab_size.
source_oov_id_to_token: a list of source OOV tokens, with the order the
same as they appear in the source.
Returns:
decoded tokens, possibly including source OOV tokens.
""" |
seq = reversed(ids) if self._reverse else ids
tokens = []
for cur_id in seq:
if cur_id in self._id_to_token:
tokens.append(self._id_to_token[cur_id])
else:
tokens.append(source_oov_id_to_token[cur_id - self.vocab_size])
return tokens |
<SYSTEM_TASK:>
Distort the color of a Tensor image.
<END_TASK>
<USER_TASK:>
Description:
def _distort_color(image, color_ordering=0, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
""" |
with tf.name_scope(scope, "distort_color", [image]):
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError("color_ordering must be in [0, 3]")
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0) |
<SYSTEM_TASK:>
Prepare one shard of the model for the encoder.
<END_TASK>
<USER_TASK:>
Description:
def transformer_prepare_encoder(inputs, target_space, hparams, features=None):
"""Prepare one shard of the model for the encoder.
Args:
inputs: a Tensor.
target_space: a Tensor.
hparams: run hyperparameters
features: optionally pass the entire features dictionary as well.
This is needed now for "packed" datasets.
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder
attention
""" |
ishape_static = inputs.shape.as_list()
encoder_input = inputs
if features and "inputs_segmentation" in features:
# Packed dataset. Keep the examples from seeing each other.
inputs_segmentation = features["inputs_segmentation"]
inputs_position = features["inputs_position"]
targets_segmentation = features["targets_segmentation"]
if (hasattr(hparams, "unidirectional_encoder") and
hparams.unidirectional_encoder):
tf.logging.info("Using unidirectional encoder")
encoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(
common_layers.shape_list(inputs)[1]))
else:
encoder_self_attention_bias = (
common_attention.attention_bias_same_segment(
inputs_segmentation, inputs_segmentation))
encoder_decoder_attention_bias = (
common_attention.attention_bias_same_segment(targets_segmentation,
inputs_segmentation))
else:
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
if (hasattr(hparams, "unidirectional_encoder") and
hparams.unidirectional_encoder):
tf.logging.info("Using unidirectional encoder")
encoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(
common_layers.shape_list(inputs)[1]))
else:
# Usual case - not a packed dataset.
encoder_self_attention_bias = ignore_padding
encoder_decoder_attention_bias = ignore_padding
inputs_position = None
if hparams.proximity_bias:
encoder_self_attention_bias += common_attention.attention_bias_proximal(
common_layers.shape_list(inputs)[1])
if target_space is not None and hparams.get("use_target_space_embedding",
True):
# Append target_space_id embedding to inputs.
emb_target_space = common_layers.embedding(
target_space,
32,
ishape_static[-1],
name="target_space_embedding",
dtype=hparams.get("activation_dtype", "float32"))
emb_target_space = tf.reshape(emb_target_space, [1, 1, -1])
encoder_input += emb_target_space
if hparams.pos == "timing":
if inputs_position is not None:
encoder_input = common_attention.add_timing_signal_1d_given_position(
encoder_input, inputs_position)
else:
encoder_input = common_attention.add_timing_signal_1d(encoder_input)
elif hparams.pos == "emb":
encoder_input = common_attention.add_positional_embedding(
encoder_input, hparams.max_length, "inputs_positional_embedding",
inputs_position)
encoder_self_attention_bias = common_layers.cast_like(
encoder_self_attention_bias, encoder_input)
encoder_decoder_attention_bias = common_layers.cast_like(
encoder_decoder_attention_bias, encoder_input)
return (encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias) |
<SYSTEM_TASK:>
Uncertainty reward based on logits.
<END_TASK>
<USER_TASK:>
Description:
def compute_uncertainty_reward(logits, predictions):
"""Uncertainty reward based on logits.""" |
# TODO(rsepassi): Add support for L1/L2 loss models. Current code only
# works for softmax models.
vocab_size = logits.shape[-1]
assert vocab_size > 1
log_probs = common_layers.log_prob_from_logits(logits)
max_log_probs = common_layers.index_last_dim_with_indices(log_probs,
predictions)
# Threshold
neg_log_prob = tf.nn.relu(-max_log_probs - 0.02)
# Sum across all but the batch dimension
reduce_dims = list(range(len(neg_log_prob.shape)))[1:]
summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims)
return summed / 10 |
<SYSTEM_TASK:>
Set the random seed from flag everywhere.
<END_TASK>
<USER_TASK:>
Description:
def set_random_seed():
"""Set the random seed from flag everywhere.""" |
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed) |
<SYSTEM_TASK:>
Generate data for a registered problem.
<END_TASK>
<USER_TASK:>
Description:
def generate_data_for_registered_problem(problem_name):
"""Generate data for a registered problem.""" |
tf.logging.info("Generating data for %s.", problem_name)
if FLAGS.num_shards:
raise ValueError("--num_shards should not be set for registered Problem.")
problem = registry.problem(problem_name)
task_id = None if FLAGS.task_id < 0 else FLAGS.task_id
data_dir = os.path.expanduser(FLAGS.data_dir)
tmp_dir = os.path.expanduser(FLAGS.tmp_dir)
if task_id is None and problem.multiprocess_generate:
if FLAGS.task_id_start != -1:
assert FLAGS.task_id_end != -1
task_id_start = FLAGS.task_id_start
task_id_end = FLAGS.task_id_end
else:
task_id_start = 0
task_id_end = problem.num_generate_tasks
pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes)
problem.prepare_to_generate(data_dir, tmp_dir)
args = [(problem_name, data_dir, tmp_dir, task_id)
for task_id in range(task_id_start, task_id_end)]
pool.map(generate_data_in_process, args)
else:
problem.generate_data(data_dir, tmp_dir, task_id) |
<SYSTEM_TASK:>
Checks if the filename exists under the path.
<END_TASK>
<USER_TASK:>
Description:
def _file_exists(path, filename):
"""Checks if the filename exists under the path.""" |
return os.path.isfile(os.path.join(path, filename)) |
<SYSTEM_TASK:>
Checks if the filename is relative, not absolute.
<END_TASK>
<USER_TASK:>
Description:
def _is_relative(path, filename):
"""Checks if the filename is relative, not absolute.""" |
return os.path.abspath(os.path.join(path, filename)).startswith(path) |
<SYSTEM_TASK:>
Generalized advantage estimator.
<END_TASK>
<USER_TASK:>
Description:
def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
# pylint: disable=g-doc-args
"""Generalized advantage estimator.
Returns:
GAE estimator. It will be one element shorter than the input; this is
because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
""" |
# pylint: enable=g-doc-args
next_value = value[1:, :]
next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
- value[:-1, :])
return_ = tf.reverse(tf.scan(
lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
[tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
tf.zeros_like(delta[0, :]),
parallel_iterations=1), [0])
return tf.check_numerics(return_, "return") |
<SYSTEM_TASK:>
Returns a reading spec of a gym space.
<END_TASK>
<USER_TASK:>
Description:
def gym_space_spec(gym_space):
"""Returns a reading spec of a gym space.
NOTE: Only implemented currently for Box and Discrete.
Args:
gym_space: instance of gym.spaces whose spec we want.
Returns:
Reading spec for that space.
Raises:
NotImplementedError: For spaces whose reading spec we haven't implemented.
""" |
# First try to determine the type.
try:
tf_dtype = tf.as_dtype(gym_space.dtype)
except TypeError as e:
tf.logging.error("Cannot convert space's type [%s] to tf.dtype",
gym_space.dtype)
raise e
# Now hand it over to the specialized functions.
if isinstance(gym_space, Box):
return box_space_spec(gym_space, tf_dtype)
elif isinstance(gym_space, Discrete):
return discrete_space_spec(gym_space, tf_dtype)
else:
raise NotImplementedError |
<SYSTEM_TASK:>
Number of elements that can be represented by the space.
<END_TASK>
<USER_TASK:>
Description:
def cardinality(gym_space):
"""Number of elements that can be represented by the space.
Makes the most sense for Discrete or Box type with integral dtype, ex: number
of actions in an action space.
Args:
gym_space: The gym space.
Returns:
np.int64 number of observations that can be represented by this space, or
returns None when this doesn't make sense, i.e. float boxes etc.
Raises:
NotImplementedError when a space's cardinality makes sense but we haven't
implemented it.
""" |
if (gym_space.dtype == np.float32) or (gym_space.dtype == np.float64):
tf.logging.error("Returning None for a float gym space's cardinality: ",
gym_space)
return None
if isinstance(gym_space, Discrete):
return gym_space.n
if isinstance(gym_space, Box):
# Construct a box with all possible values in this box and take a product.
return np.prod(gym_space.high - gym_space.low + 1)
raise NotImplementedError |
<SYSTEM_TASK:>
RMSE but will argmax if last dim is not 1.
<END_TASK>
<USER_TASK:>
Description:
def image_rmse(predictions, labels, weights_fn=common_layers.weights_all):
"""RMSE but will argmax if last dim is not 1.""" |
if common_layers.shape_list(predictions)[-1] == 1:
predictions = tf.squeeze(predictions, axis=[-1])
else:
predictions = tf.argmax(predictions, axis=-1)
return padded_rmse(predictions, labels, weights_fn) |
<SYSTEM_TASK:>
Explained variance, also known as R^2.
<END_TASK>
<USER_TASK:>
Description:
def padded_variance_explained(predictions,
labels,
weights_fn=common_layers.weights_all):
"""Explained variance, also known as R^2.""" |
predictions, labels = common_layers.pad_with_zeros(predictions, labels)
targets = labels
weights = weights_fn(targets)
y_bar = tf.reduce_mean(weights * targets)
tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2))
res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2))
r2 = 1. - res_ss / tot_ss
return r2, tf.reduce_sum(weights) |
<SYSTEM_TASK:>
Average edit distance, ignoring padding 0s.
<END_TASK>
<USER_TASK:>
Description:
def sequence_edit_distance(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Average edit distance, ignoring padding 0s.
The score returned is the edit distance divided by the total length of
reference truth and the weight returned is the total length of the truth.
Args:
predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and
type tf.float32 representing the logits, 0-padded.
labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32
representing the labels of same length as logits and 0-padded.
weights_fn: ignored. The weights returned are the total length of the ground
truth labels, excluding 0-paddings.
Returns:
(edit distance / reference length, reference length)
Raises:
ValueError: if weights_fn is not common_layers.weights_nonzero.
""" |
if weights_fn is not common_layers.weights_nonzero:
raise ValueError("Only weights_nonzero can be used for this metric.")
with tf.variable_scope("edit_distance", values=[predictions, labels]):
# Transform logits into sequence classes by taking max at every step.
predictions = tf.to_int32(
tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3)))
nonzero_idx = tf.where(tf.not_equal(predictions, 0))
sparse_outputs = tf.SparseTensor(nonzero_idx,
tf.gather_nd(predictions, nonzero_idx),
tf.shape(predictions, out_type=tf.int64))
labels = tf.squeeze(labels, axis=(2, 3))
nonzero_idx = tf.where(tf.not_equal(labels, 0))
label_sparse_outputs = tf.SparseTensor(nonzero_idx,
tf.gather_nd(labels, nonzero_idx),
tf.shape(labels, out_type=tf.int64))
distance = tf.reduce_sum(
tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False))
reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0])
return distance / reference_length, reference_length |
<SYSTEM_TASK:>
Average log-perplexity exluding padding 0s. No smoothing.
<END_TASK>
<USER_TASK:>
Description:
def padded_neg_log_perplexity(predictions,
labels,
weights_fn=common_layers.weights_nonzero):
"""Average log-perplexity exluding padding 0s. No smoothing.""" |
num, den = common_layers.padded_cross_entropy(
predictions, labels, 0.0, weights_fn=weights_fn, reduce_sum=False)
return (-num, den) |
<SYSTEM_TASK:>
Average log-perplexity with custom targets_mask.
<END_TASK>
<USER_TASK:>
Description:
def padded_neg_log_perplexity_with_masking(
predictions,
labels,
features,
weights_fn=None):
"""Average log-perplexity with custom targets_mask.""" |
del weights_fn
if "targets_mask" not in features:
raise ValueError("masked_neg_log_perplexity requires targets_mask feature")
# Features are 4 dimensional, so we need to reshape the targets_mask to match
# the shape of the labels. A lot of models rely on these features being 4D,
# so it's best to update the shape of the mask.
extended_targets_mask_shape = common_layers.shape_list(
features["targets_mask"])
extended_targets_mask_shape.extend([1, 1])
features["targets_mask"] = tf.reshape(features["targets_mask"],
shape=extended_targets_mask_shape)
mask_fn = lambda labels: features["targets_mask"]
return padded_neg_log_perplexity(predictions, labels, mask_fn) |
<SYSTEM_TASK:>
Used to evaluate the VQA accuracy.
<END_TASK>
<USER_TASK:>
Description:
def multilabel_accuracy_matchk(predictions,
labels,
k,
weights_fn=common_layers.weights_nonzero):
"""Used to evaluate the VQA accuracy.
Let n be the times that predictions appear in labels, then final score
is min(n/k, 1).
Refer to https://arxiv.org/pdf/1505.00468.pdf.
Args:
predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size].
labels: A tensor with shape [batch_size, length, 1, 1].
k: A tensor constant.
weights_fn: weight function.
Returns:
scores: min(n/k, 1).
weights: returns all ones.
""" |
predictions = tf.to_int32(tf.argmax(predictions, axis=-1))
scores = tf.to_float(tf.equal(predictions, labels))
# those label == 0 do not count
weights = weights_fn(labels)
scores *= weights
scores = tf.reduce_sum(scores, axis=[1, 2, 3])
scores = tf.minimum(scores / tf.to_float(k), 1)
# every sample count
weights = tf.ones(tf.shape(scores), dtype=tf.float32)
return scores, weights |
<SYSTEM_TASK:>
Precision of set predictions.
<END_TASK>
<USER_TASK:>
Description:
def set_precision(predictions, labels,
weights_fn=common_layers.weights_nonzero):
"""Precision of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
""" |
with tf.variable_scope("set_precision", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights |
<SYSTEM_TASK:>
Reshapes predictions and passes it to tensorboard.
<END_TASK>
<USER_TASK:>
Description:
def image_summary(predictions, targets, hparams):
"""Reshapes predictions and passes it to tensorboard.
Args:
predictions : The predicted image (logits).
targets : The ground truth.
hparams: model hparams.
Returns:
summary_proto: containing the summary images.
weights: A Tensor of zeros of the same shape as predictions.
""" |
del hparams
results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8)
gold = tf.cast(targets, tf.uint8)
summary1 = tf.summary.image("prediction", results, max_outputs=2)
summary2 = tf.summary.image("data", gold, max_outputs=2)
summary = tf.summary.merge([summary1, summary2])
return summary, tf.zeros_like(predictions) |
<SYSTEM_TASK:>
Calculate softmax cross entropy given one-hot labels and logits.
<END_TASK>
<USER_TASK:>
Description:
def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None):
"""Calculate softmax cross entropy given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
cross-entropy (scalar), weights
""" |
with tf.variable_scope("softmax_cross_entropy_one_hot",
values=[logits, labels]):
del weights_fn
cross_entropy = tf.losses.softmax_cross_entropy(
onehot_labels=labels, logits=logits)
return cross_entropy, tf.constant(1.0) |
<SYSTEM_TASK:>
Calculate accuracy for a set, given one-hot labels and logits.
<END_TASK>
<USER_TASK:>
Description:
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
"""Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights
""" |
with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]):
del weights_fn
predictions = tf.nn.sigmoid(logits)
labels = tf.argmax(labels, -1)
predictions = tf.argmax(predictions, -1)
_, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
return accuracy, tf.constant(1.0) |
<SYSTEM_TASK:>
Calculate recall for a set, given one-hot labels and logits.
<END_TASK>
<USER_TASK:>
Description:
def sigmoid_recall_one_hot(logits, labels, weights_fn=None):
"""Calculate recall for a set, given one-hot labels and logits.
Predictions are converted to one-hot,
as predictions[example][arg-max(example)] = 1
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
recall (scalar), weights
""" |
with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]):
del weights_fn
num_classes = logits.shape[-1]
predictions = tf.nn.sigmoid(logits)
predictions = tf.argmax(predictions, -1)
predictions = tf.one_hot(predictions, num_classes)
_, recall = tf.metrics.recall(labels=labels, predictions=predictions)
return recall, tf.constant(1.0) |
<SYSTEM_TASK:>
Calculate sigmoid cross entropy for one-hot lanels and logits.
<END_TASK>
<USER_TASK:>
Description:
def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None):
"""Calculate sigmoid cross entropy for one-hot lanels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
cross_entropy (scalar), weights
""" |
with tf.variable_scope("sigmoid_cross_entropy_one_hot",
values=[logits, labels]):
del weights_fn
cross_entropy = tf.losses.sigmoid_cross_entropy(
multi_class_labels=labels, logits=logits)
return cross_entropy, tf.constant(1.0) |
<SYSTEM_TASK:>
Calculate ROC AUC.
<END_TASK>
<USER_TASK:>
Description:
def roc_auc(logits, labels, weights_fn=None):
"""Calculate ROC AUC.
Requires binary classes.
Args:
logits: Tensor of size [batch_size, 1, 1, num_classes]
labels: Tensor of size [batch_size, 1, 1, num_classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
ROC AUC (scalar), weights
""" |
del weights_fn
with tf.variable_scope("roc_auc", values=[logits, labels]):
predictions = tf.argmax(logits, axis=-1)
_, auc = tf.metrics.auc(labels, predictions, curve="ROC")
return auc, tf.constant(1.0) |
<SYSTEM_TASK:>
Creates the evaluation metrics for the model.
<END_TASK>
<USER_TASK:>
Description:
def create_evaluation_metrics(problems, model_hparams):
"""Creates the evaluation metrics for the model.
Args:
problems: List of Problem instances.
model_hparams: a set of hparams.
Returns:
dict<metric name, metric function>. The metric functions have signature
(Tensor predictions, features) -> (metric Tensor, update op), where features
is a dict with keys {targets}.
Raises:
ValueError: if the metrics specified by a problem are not recognized (i.e.
are not defined in the Metrics enum.
""" |
def reduce_dimensions(predictions, labels):
"""Reduce dimensions for high-dimensional predictions and labels."""
# We will treat first dimensions as batch. One example are video frames.
if len(predictions.get_shape()) > 5:
predictions_shape = common_layers.shape_list(predictions)
predictions = tf.reshape(
predictions, [predictions_shape[0], predictions_shape[1], -1,
predictions_shape[-1]])
labels_shape = common_layers.shape_list(labels)
labels = tf.reshape(
labels, [labels_shape[0], labels_shape[1], -1])
return predictions, labels
def make_problem_specific_metric_fn(metric_fn, weights_fn):
"""Create a metric fn."""
def problem_metric_fn(predictions, features, labels):
"""Metric fn."""
# Send along the entire features dict if the metric fn has the kwarg
# "features".
kwargs = {}
args, _, keywords, _ = inspect.getargspec(metric_fn)
if ("features" in args) or keywords:
kwargs["features"] = features
predictions, labels = reduce_dimensions(predictions, labels)
scores, weights = metric_fn(predictions, labels,
weights_fn=weights_fn, **kwargs)
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def make_image_wrapped_metric_fn(metric_fn):
"""Metric fn without tf.metrics.mean."""
def image_wrapped_metric_fn(predictions,
features,
labels,
weights_fn=common_layers.weights_all):
del weights_fn
del features
predictions, labels = reduce_dimensions(predictions, labels)
return metric_fn(predictions, labels, model_hparams)
return image_wrapped_metric_fn
def weights_fn_for_mp(problem_task_id):
return lambda x: common_layers.weights_multi_problem(x, problem_task_id)
eval_metrics = {}
for problem_instance in problems:
problem_name = problem_instance.name
if problem_instance.was_reversed:
problem_name += "_rev"
metrics = problem_instance.eval_metric_fns(model_hparams)
if hasattr(model_hparams.problem, "task_list"):
metrics = model_hparams.problem.eval_metric_fns(model_hparams)
tm = problem_instance.get_hparams(model_hparams).modality["targets"]
if not isinstance(tm, dict):
tm = {"targets": tm}
for target_name, modality in six.iteritems(tm):
weights_fn = model_hparams.weights_fn.get(
"targets",
modalities.get_weights_fn(modality))
if hasattr(model_hparams.problem, "task_list"):
ptid = problem_instance.task_id # pylint: disable=cell-var-from-loop
weights_fn = weights_fn_for_mp(ptid)
for metric, metric_fn in six.iteritems(metrics):
overload_eval_metric_name = getattr(
model_hparams, "overload_eval_metric_name", None)
if len(problems) == 1 and overload_eval_metric_name:
metric_name = "metrics-%s/%s/%s" % (
overload_eval_metric_name, target_name, metric)
else:
metric_name = "metrics-%s/%s/%s" % (problem_name, target_name, metric)
if metric == Metrics.IMAGE_SUMMARY:
eval_metrics[metric_name] = make_image_wrapped_metric_fn(metric_fn)
else:
eval_metrics[metric_name] = make_problem_specific_metric_fn(
metric_fn, weights_fn)
return eval_metrics |
<SYSTEM_TASK:>
Calculate word error rate.
<END_TASK>
<USER_TASK:>
Description:
def word_error_rate(raw_predictions,
labels,
lookup=None,
weights_fn=common_layers.weights_nonzero):
"""Calculate word error rate.
Args:
raw_predictions: The raw predictions.
labels: The actual labels.
lookup: A tf.constant mapping indices to output tokens.
weights_fn: Weighting function.
Returns:
The word error rate.
""" |
def from_tokens(raw, lookup_):
gathered = tf.gather(lookup_, tf.cast(raw, tf.int32))
joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b"<EOS>.*", b"")
cleaned = tf.regex_replace(joined, b"_", b" ")
tokens = tf.string_split(cleaned, " ")
return tokens
def from_characters(raw, lookup_):
"""Convert ascii+2 encoded codes to string-tokens."""
corrected = tf.bitcast(
tf.clip_by_value(tf.subtract(raw, 2), 0, 255), tf.uint8)
gathered = tf.gather(lookup_, tf.cast(corrected, tf.int32))[:, :, 0]
joined = tf.reduce_join(gathered, axis=1)
cleaned = tf.regex_replace(joined, b"\0", b"")
tokens = tf.string_split(cleaned, " ")
return tokens
if lookup is None:
lookup = tf.constant([chr(i) for i in range(256)])
convert_fn = from_characters
else:
convert_fn = from_tokens
if weights_fn is not common_layers.weights_nonzero:
raise ValueError("Only weights_nonzero can be used for this metric.")
with tf.variable_scope("word_error_rate", values=[raw_predictions, labels]):
raw_predictions = tf.squeeze(
tf.argmax(raw_predictions, axis=-1), axis=(2, 3))
labels = tf.squeeze(labels, axis=(2, 3))
reference = convert_fn(labels, lookup)
predictions = convert_fn(raw_predictions, lookup)
distance = tf.reduce_sum(
tf.edit_distance(predictions, reference, normalize=False))
reference_length = tf.cast(
tf.size(reference.values, out_type=tf.int32), dtype=tf.float32)
return distance / reference_length, reference_length |
<SYSTEM_TASK:>
Calculate pearson correlation coefficient.
<END_TASK>
<USER_TASK:>
Description:
def pearson_correlation_coefficient(predictions, labels, weights_fn=None):
"""Calculate pearson correlation coefficient.
Args:
predictions: The raw predictions.
labels: The actual labels.
weights_fn: Weighting function.
Returns:
The pearson correlation coefficient.
""" |
del weights_fn
_, pearson = tf.contrib.metrics.streaming_pearson_correlation(predictions,
labels)
return pearson, tf.constant(1.0) |
<SYSTEM_TASK:>
A stack of attention_lm layers.
<END_TASK>
<USER_TASK:>
Description:
def attention_lm_decoder(decoder_input,
decoder_self_attention_bias,
hparams,
name="decoder"):
"""A stack of attention_lm layers.
Args:
decoder_input: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors
""" |
x = decoder_input
with tf.variable_scope(name):
for layer in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams), None, decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = common_layers.conv_hidden_relu(
common_layers.layer_preprocess(x, hparams),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
return common_layers.layer_preprocess(x, hparams) |
<SYSTEM_TASK:>
BLEU score computation between labels and predictions.
<END_TASK>
<USER_TASK:>
Description:
def bleu_score(predictions, labels, **unused_kwargs):
"""BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
bleu: int, approx bleu score
""" |
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
# Convert the outputs and labels to a [batch_size, input_length] tensor.
outputs = tf.squeeze(outputs, axis=[-1, -2])
labels = tf.squeeze(labels, axis=[-1, -2])
bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32)
return bleu, tf.constant(1.0) |
<SYSTEM_TASK:>
r"""Tokenize a string following the official BLEU implementation.
<END_TASK>
<USER_TASK:>
Description:
def bleu_tokenize(string):
r"""Tokenize a string following the official BLEU implementation.
See https://github.com/moses-smt/mosesdecoder/"
"blob/master/scripts/generic/mteval-v14.pl#L954-L983
In our case, the input string is expected to be just one line
and no HTML entities de-escaping is needed.
So we just tokenize on punctuation and symbols,
except when a punctuation is preceded and followed by a digit
(e.g. a comma/dot as a thousand/decimal separator).
Note that a number (e.g. a year) followed by a dot at the end of sentence
is NOT tokenized,
i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g`
does not match this case (unless we add a space after each sentence).
However, this error is already in the original mteval-v14.pl
and we want to be consistent with it.
Args:
string: the input string
Returns:
a list of tokens
""" |
string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string)
string = uregex.punct_nondigit_re.sub(r" \1 \2", string)
string = uregex.symbol_re.sub(r" \1 ", string)
return string.split() |
<SYSTEM_TASK:>
Glob twice, first time possibly catching `NotFoundError`.
<END_TASK>
<USER_TASK:>
Description:
def _try_twice_tf_glob(pattern):
"""Glob twice, first time possibly catching `NotFoundError`.
tf.gfile.Glob may crash with
```
tensorflow.python.framework.errors_impl.NotFoundError:
xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40;
No such file or directory
```
Standard glob.glob does not have this bug, but does not handle multiple
filesystems (e.g. `gs://`), so we call tf.gfile.Glob, the first time possibly
catching the `NotFoundError`.
Args:
pattern: str, glob pattern.
Returns:
list<str> matching filepaths.
""" |
try:
return tf.gfile.Glob(pattern)
except tf.errors.NotFoundError:
return tf.gfile.Glob(pattern) |
<SYSTEM_TASK:>
Return list of StepFiles sorted by step from files at path_prefix.
<END_TASK>
<USER_TASK:>
Description:
def _read_stepfiles_list(path_prefix, path_suffix=".index", min_steps=0):
"""Return list of StepFiles sorted by step from files at path_prefix.""" |
stepfiles = []
for filename in _try_twice_tf_glob(path_prefix + "*-[0-9]*" + path_suffix):
basename = filename[:-len(path_suffix)] if path_suffix else filename
try:
steps = int(basename.rsplit("-")[-1])
except ValueError: # The -[0-9]* part is not an integer.
continue
if steps < min_steps:
continue
if not os.path.exists(filename):
tf.logging.info(filename + " was deleted, so skipping it")
continue
stepfiles.append(StepFile(basename, os.path.getmtime(filename),
os.path.getctime(filename), steps))
return sorted(stepfiles, key=lambda x: -x.steps) |
<SYSTEM_TASK:>
Continuously yield new files with steps in filename as they appear.
<END_TASK>
<USER_TASK:>
Description:
def stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0,
path_suffix=".index", sleep_sec=10):
"""Continuously yield new files with steps in filename as they appear.
This is useful for checkpoint files or other files whose names differ just in
an integer marking the number of steps and match the wildcard path_prefix +
"*-[0-9]*" + path_suffix.
Unlike `tf.contrib.training.checkpoints_iterator`, this implementation always
starts from the oldest files (and it cannot miss any file). Note that the
oldest checkpoint may be deleted anytime by Tensorflow (if set up so). It is
up to the user to check that the files returned by this generator actually
exist.
Args:
path_prefix: The directory + possible common filename prefix to the files.
wait_minutes: The maximum amount of minutes to wait between files.
min_steps: Skip files with lower global step.
path_suffix: Common filename suffix (after steps), including possible
extension dot.
sleep_sec: How often to check for new files.
Yields:
named tuples (filename, mtime, ctime, steps) of the files as they arrive.
""" |
# Wildcard D*-[0-9]* does not match D/x-1, so if D is a directory let
# path_prefix="D/".
if not path_prefix.endswith(os.sep) and os.path.isdir(path_prefix):
path_prefix += os.sep
stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps)
tf.logging.info("Found %d files with steps: %s",
len(stepfiles),
", ".join(str(x.steps) for x in reversed(stepfiles)))
exit_time = time.time() + wait_minutes * 60
while True:
if not stepfiles and wait_minutes:
tf.logging.info(
"Waiting till %s if a new file matching %s*-[0-9]*%s appears",
time.asctime(time.localtime(exit_time)), path_prefix, path_suffix)
while True:
stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps)
if stepfiles or time.time() > exit_time:
break
time.sleep(sleep_sec)
if not stepfiles:
return
stepfile = stepfiles.pop()
exit_time, min_steps = (stepfile.ctime + wait_minutes * 60,
stepfile.steps + 1)
yield stepfile |
<SYSTEM_TASK:>
Extract the VQA V2 annotation files to directory unless it's there.
<END_TASK>
<USER_TASK:>
Description:
def _get_vqa_v2_annotations(directory,
annotation_url,
annotation_filename="vqa_v2.tar.gz"):
"""Extract the VQA V2 annotation files to directory unless it's there.""" |
annotation_file = generator_utils.maybe_download_from_drive(
directory, annotation_filename, annotation_url)
with tarfile.open(annotation_file, "r:gz") as annotation_tar:
annotation_tar.extractall(directory) |
<SYSTEM_TASK:>
Extract the VQA V2 image data set to directory unless it's there.
<END_TASK>
<USER_TASK:>
Description:
def _get_vqa_v2_image_raw_dataset(directory, image_root_url, image_urls):
"""Extract the VQA V2 image data set to directory unless it's there.""" |
for url in image_urls:
filename = os.path.basename(url)
download_url = os.path.join(image_root_url, url)
path = generator_utils.maybe_download(directory, filename, download_url)
unzip_dir = os.path.join(directory, filename.strip(".zip"))
if not tf.gfile.Exists(unzip_dir):
zipfile.ZipFile(path, "r").extractall(directory) |
<SYSTEM_TASK:>
Extract the VQA V2 feature data set to directory unless it's there.
<END_TASK>
<USER_TASK:>
Description:
def _get_vqa_v2_image_feature_dataset(
directory, feature_url, feature_filename="mscoco_feat.tar.gz"):
"""Extract the VQA V2 feature data set to directory unless it's there.""" |
feature_file = generator_utils.maybe_download_from_drive(
directory, feature_filename, feature_url)
with tarfile.open(feature_file, "r:gz") as feature_tar:
feature_tar.extractall(directory) |
<SYSTEM_TASK:>
Helper function for raising a value error for bad assignment.
<END_TASK>
<USER_TASK:>
Description:
def _parse_fail(name, var_type, value, values):
"""Helper function for raising a value error for bad assignment.""" |
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' %
(name, var_type.__name__, value, values)) |
<SYSTEM_TASK:>
Update results_dictionary with a scalar value.
<END_TASK>
<USER_TASK:>
Description:
def _process_scalar_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary with a scalar value.
Used to update the results_dictionary to be returned by parse_values when
encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("s" or "arr").
parse_fn: Function for parsing the actual value.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
m_dict['index']: List index value (or None)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has already been used.
""" |
try:
parsed_value = parse_fn(m_dict['val'])
except ValueError:
_parse_fail(name, var_type, m_dict['val'], values)
# If no index is provided
if not m_dict['index']:
if name in results_dictionary:
_reuse_fail(name, values)
results_dictionary[name] = parsed_value
else:
if name in results_dictionary:
# The name has already been used as a scalar, then it
# will be in this dictionary and map to a non-dictionary.
if not isinstance(results_dictionary.get(name), dict):
_reuse_fail(name, values)
else:
results_dictionary[name] = {}
index = int(m_dict['index'])
# Make sure the index position hasn't already been assigned a value.
if index in results_dictionary[name]:
_reuse_fail('{}[{}]'.format(name, index), values)
results_dictionary[name][index] = parsed_value |
<SYSTEM_TASK:>
Update results_dictionary from a list of values.
<END_TASK>
<USER_TASK:>
Description:
def _process_list_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("arr").
parse_fn: Function for parsing individual values.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has an index or the values cannot be parsed.
""" |
if m_dict['index'] is not None:
raise ValueError('Assignment of a list to a list index.')
elements = filter(None, re.split('[ ,]', m_dict['vals']))
# Make sure the name hasn't already been assigned a value
if name in results_dictionary:
raise _reuse_fail(name, values)
try:
results_dictionary[name] = [parse_fn(e) for e in elements]
except ValueError:
_parse_fail(name, var_type, m_dict['vals'], values) |
<SYSTEM_TASK:>
Cast hparam to the provided type, if compatible.
<END_TASK>
<USER_TASK:>
Description:
def _cast_to_type_if_compatible(name, param_type, value):
"""Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Raises:
ValueError: If the type of `value` is not compatible with param_type.
* If `param_type` is a string type, but `value` is not.
* If `param_type` is a boolean, but `value` is not, or vice versa.
* If `param_type` is an integer type, but `value` is not.
* If `param_type` is a float type, but `value` is not a numeric type.
""" |
fail_msg = (
"Could not cast hparam '%s' of type '%s' from value %r" %
(name, param_type, value))
# Some callers use None, for which we can't do any casting/checking. :(
if issubclass(param_type, type(None)):
return value
# Avoid converting a non-string type to a string.
if (issubclass(param_type, (six.string_types, six.binary_type)) and
not isinstance(value, (six.string_types, six.binary_type))):
raise ValueError(fail_msg)
# Avoid converting a number or string type to a boolean or vice versa.
if issubclass(param_type, bool) != isinstance(value, bool):
raise ValueError(fail_msg)
# Avoid converting float to an integer (the reverse is fine).
if (issubclass(param_type, numbers.Integral) and
not isinstance(value, numbers.Integral)):
raise ValueError(fail_msg)
# Avoid converting a non-numeric type to a numeric type.
if (issubclass(param_type, numbers.Number) and
not isinstance(value, numbers.Number)):
raise ValueError(fail_msg)
return param_type(value) |
<SYSTEM_TASK:>
Parses hyperparameter values from a string into a python map.
<END_TASK>
<USER_TASK:>
Description:
def parse_values(values, type_map, ignore_unknown=False):
"""Parses hyperparameter values from a string into a python map.
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, a ValueError
is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2').
If a hyperparameter name in both an index assignment and scalar assignment,
a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1').
The hyperparameter name may contain '.' symbols, which will result in an
attribute name that is only accessible through the getattr and setattr
functions. (And must be first explicit added through add_hparam.)
WARNING: Use of '.' in your variable names is allowed, but is not well
supported and not recommended.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
When index assignment is used, the corresponding type_map key should be the
list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not
"arr[1]").
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
ignore_unknown: Bool. Whether values that are missing a type in type_map
should be ignored. If set to True, a ValueError will not be raised for
unknown hyperparameter type.
Returns:
A python map mapping each name to either:
* A scalar value.
* A list of scalar values.
* A dictionary mapping index numbers to scalar values.
(e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}")
Raises:
ValueError: If there is a problem with input.
* If `values` cannot be parsed.
* If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]').
* If the same rvalue is assigned two different values (e.g. 'a=1,a=2',
'a[1]=1,a[1]=2', or 'a=1,a=[1]')
""" |
results_dictionary = {}
pos = 0
while pos < len(values):
m = PARAM_RE.match(values, pos)
if not m:
raise ValueError('Malformed hyperparameter value: %s' % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
if name not in type_map:
if ignore_unknown:
continue
raise ValueError('Unknown hyperparameter type for %s' % name)
type_ = type_map[name]
# Set up correct parsing function (depending on whether type_ is a bool)
if type_ == bool:
def parse_bool(value):
if value in ['true', 'True']:
return True
elif value in ['false', 'False']:
return False
else:
try:
return bool(int(value))
except ValueError:
_parse_fail(name, type_, value, values)
parse = parse_bool
else:
parse = type_
# If a singe value is provided
if m_dict['val'] is not None:
_process_scalar_value(name, parse, type_, m_dict, values,
results_dictionary)
# If the assigned value is a list:
elif m_dict['vals'] is not None:
_process_list_value(name, parse, type_, m_dict, values,
results_dictionary)
else: # Not assigned a list or value
_parse_fail(name, type_, '', values)
return results_dictionary |
<SYSTEM_TASK:>
Set the value of an existing hyperparameter.
<END_TASK>
<USER_TASK:>
Description:
def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
""" |
param_type, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, [
_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value)) |
<SYSTEM_TASK:>
Removes the hyperparameter with key 'name'.
<END_TASK>
<USER_TASK:>
Description:
def del_hparam(self, name):
"""Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter.
""" |
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name] |
<SYSTEM_TASK:>
Override existing hyperparameter values, parsing new values from a string.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, values):
"""Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed or a hyperparameter in `values`
doesn't exist.
""" |
type_map = {}
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self.override_from_dict(values_map) |
<SYSTEM_TASK:>
Override existing hyperparameter values, parsing new values from a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def override_from_dict(self, values_dict):
"""Override existing hyperparameter values, parsing new values from a dictionary.
Args:
values_dict: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_dict` doesn't exist.
ValueError: If `values_dict` cannot be parsed.
""" |
for name, value in values_dict.items():
self.set_hparam(name, value)
return self |
<SYSTEM_TASK:>
Serializes the hyperparameters into JSON.
<END_TASK>
<USER_TASK:>
Description:
def to_json(self, indent=None, separators=None, sort_keys=False):
"""Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert newlines. `None` (the default) selects the
most compact representation.
separators: Optional `(item_separator, key_separator)` tuple. Default is
`(', ', ': ')`.
sort_keys: If `True`, the output dictionaries will be sorted by key.
Returns:
A JSON string.
""" |
def remove_callables(x):
"""Omit callable elements from input with arbitrary nesting."""
if isinstance(x, dict):
return {k: remove_callables(v) for k, v in six.iteritems(x)
if not callable(v)}
elif isinstance(x, list):
return [remove_callables(i) for i in x if not callable(i)]
return x
return json.dumps(
remove_callables(self.values()),
indent=indent,
separators=separators,
sort_keys=sort_keys) |
<SYSTEM_TASK:>
Override existing hyperparameter values, parsing new values from a json object.
<END_TASK>
<USER_TASK:>
Description:
def parse_json(self, values_json):
"""Override existing hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_json` doesn't exist.
ValueError: If `values_json` cannot be parsed.
""" |
values_map = json.loads(values_json)
return self.override_from_dict(values_map) |
<SYSTEM_TASK:>
Return the hyperparameter values as a Python dictionary.
<END_TASK>
<USER_TASK:>
Description:
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
""" |
return {n: getattr(self, n) for n in self._hparam_types.keys()} |
<SYSTEM_TASK:>
Returns the value of `key` if it exists, else `default`.
<END_TASK>
<USER_TASK:>
Description:
def get(self, key, default=None):
"""Returns the value of `key` if it exists, else `default`.""" |
if key in self._hparam_types:
# Ensure that default is compatible with the parameter type.
if default is not None:
param_type, is_param_list = self._hparam_types[key]
type_str = 'list<%s>' % param_type if is_param_list else str(param_type)
fail_msg = ("Hparam '%s' of type '%s' is incompatible with "
'default=%s' % (key, type_str, default))
is_default_list = isinstance(default, list)
if is_param_list != is_default_list:
raise ValueError(fail_msg)
try:
if is_default_list:
for value in default:
_cast_to_type_if_compatible(key, param_type, value)
else:
_cast_to_type_if_compatible(key, param_type, default)
except ValueError as e:
raise ValueError('%s. %s' % (fail_msg, e))
return getattr(self, key)
return default |
<SYSTEM_TASK:>
Returns the field name given parameter type and is_list.
<END_TASK>
<USER_TASK:>
Description:
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
""" |
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix]) |
<SYSTEM_TASK:>
Create an info-like tuple for feature given some shapes and vocab size.
<END_TASK>
<USER_TASK:>
Description:
def _make_info(shape_list, num_classes):
"""Create an info-like tuple for feature given some shapes and vocab size.""" |
feature_info = collections.namedtuple("FeatureInfo", ["shape", "num_classes"])
cur_shape = list(shape_list[0])
# We need to merge the provided shapes, put None where they disagree.
for shape in shape_list:
if len(shape) != len(cur_shape):
raise ValueError("Shapes need to have the same number of dimensions.")
for i in range(len(shape)):
if cur_shape[i] is not None:
if shape[i] != cur_shape[i]:
cur_shape[i] = None
return feature_info(cur_shape, num_classes) |
<SYSTEM_TASK:>
Select a subset of features from the example dict.
<END_TASK>
<USER_TASK:>
Description:
def _select_features(example, feature_list=None):
"""Select a subset of features from the example dict.""" |
feature_list = feature_list or ["inputs", "targets"]
return {f: example[f] for f in feature_list} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.