text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Essentially same as reset, but we don't have observations.
<END_TASK>
<USER_TASK:>
Description:
def complete_all_trajectories(self):
"""Essentially same as reset, but we don't have observations.""" |
for index in range(self.batch_size):
trajectory = self._trajectories[index]
assert trajectory.is_active
self._complete_trajectory(trajectory, index) |
<SYSTEM_TASK:>
Record the information obtained from taking a step in all envs.
<END_TASK>
<USER_TASK:>
Description:
def step(self, observations, raw_rewards, processed_rewards, dones, actions):
"""Record the information obtained from taking a step in all envs.
Records (observation, rewards, done) in a new time-step and actions in the
current time-step.
If any trajectory gets done, we move that trajectory to
completed_trajectories.
Args:
observations: ndarray of first dimension self.batch_size, which has the
observations after we've stepped, i.e. s_{t+1} where t is the current
state.
raw_rewards: ndarray of first dimension self.batch_size containing raw
rewards i.e. r_{t+1}.
processed_rewards: ndarray of first dimension self.batch_size containing
processed rewards. i.e. r_{t+1}
dones: ndarray of first dimension self.batch_size, containing true at an
index if that env is done, i.e. d_{t+1}
actions: ndarray of first dimension self.batch_size, containing actions
applied at the current time-step, which leads to the observations
rewards and done at the next time-step, i.e. a_t
""" |
# Pre-conditions
assert isinstance(observations, np.ndarray)
assert isinstance(raw_rewards, np.ndarray)
assert isinstance(processed_rewards, np.ndarray)
assert isinstance(dones, np.ndarray)
assert isinstance(actions, np.ndarray)
# We assume that we step in all envs, i.e. not like reset where we can reset
# some envs and not others.
assert self.batch_size == observations.shape[0]
assert self.batch_size == raw_rewards.shape[0]
assert self.batch_size == processed_rewards.shape[0]
assert self.batch_size == dones.shape[0]
assert self.batch_size == actions.shape[0]
for index in range(self.batch_size):
trajectory = self._trajectories[index]
# NOTE: If the trajectory isn't active, that means it doesn't have any
# time-steps in it, but we are in step, so the assumption is that it has
# a prior observation from which we are stepping away from.
# TODO(afrozm): Let's re-visit this if it becomes too restrictive.
assert trajectory.is_active
# To this trajectory's last time-step, set actions.
trajectory.change_last_time_step(action=actions[index])
# Create a new time-step to add observation, done & rewards (no actions).
trajectory.add_time_step(
observation=observations[index],
done=dones[index],
raw_reward=raw_rewards[index],
processed_reward=processed_rewards[index])
# If the trajectory is completed, i.e. dones[index] == True, then we
# account for it right-away.
if dones[index]:
self._complete_trajectory(trajectory, index)
# NOTE: The new trajectory at `index` is going to be in-active and
# `reset` should be called on it.
assert not self._trajectories[index].is_active |
<SYSTEM_TASK:>
Returns the number of time-steps in completed and incomplete trajectories.
<END_TASK>
<USER_TASK:>
Description:
def num_time_steps(self):
"""Returns the number of time-steps in completed and incomplete trajectories.""" |
num_time_steps = sum(t.num_time_steps for t in self.trajectories)
return num_time_steps + self.num_completed_time_steps |
<SYSTEM_TASK:>
Pads the observations in all the trajectories and returns them.
<END_TASK>
<USER_TASK:>
Description:
def observations_np(self, boundary=20):
"""Pads the observations in all the trajectories and returns them.
Args:
boundary: integer, Observations will be padded to (n * boundary) + 1 where
n is an integer.
Returns:
a tuple(padded_observations, time_steps), with shapes:
padded_observations: (self.batch_size, n * boundary + 1) + OBS
time_steps: integer list of length = self.batch_size
""" |
list_observations_np_ts = [t.observations_np for t in self.trajectories]
# Every element in `list_observations_np_ts` is shaped (t,) + OBS
OBS = list_observations_np_ts[0].shape[1:] # pylint: disable=invalid-name
num_time_steps = [t.num_time_steps for t in self.trajectories]
t_max = max(num_time_steps)
# t_max is rounded to the next multiple of `boundary`
boundary = int(boundary)
bucket_length = boundary * int(np.ceil(float(t_max) / boundary))
def padding_config(obs):
# We're padding the first axis only, since that is the time-step.
num_to_pad = bucket_length + 1 - obs.shape[0]
return [(0, num_to_pad)] + [(0, 0)] * len(OBS)
return np.stack([
np.pad(obs, padding_config(obs), "constant")
for obs in list_observations_np_ts]), num_time_steps |
<SYSTEM_TASK:>
Generate squad examples.
<END_TASK>
<USER_TASK:>
Description:
def _generate_examples(tmp_dir, dataset_split):
"""Generate squad examples.
Args:
tmp_dir: a string
dataset_split: problem.DatasetSplit.TRAIN or problem.DatasetSplit.EVAL
Yields:
dictionaries representing examples
""" |
if dataset_split == problem.DatasetSplit.TRAIN:
file_name = _TRAINING_SET
else:
file_name = _DEV_SET
squad_file = generator_utils.maybe_download(tmp_dir,
file_name,
os.path.join(_URL, file_name))
with tf.gfile.GFile(squad_file, mode="r") as fp:
squad = json.load(fp)
version = squad["version"]
for article in squad["data"]:
if "title" in article:
title = article["title"].strip()
else:
title = "no title"
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
for qa in paragraph["qas"]:
question = qa["question"].strip()
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
example = {
"version": version,
"title": title,
"context": context,
"question": question,
"id": id_,
"answer_starts": answer_starts,
"answers": answers,
"num_answers": len(answers),
"is_supervised": True,
}
yield example |
<SYSTEM_TASK:>
Create a layer stack based on the hyperparameter values.
<END_TASK>
<USER_TASK:>
Description:
def layer_stack_from_hparams(hparams, prefix):
"""Create a layer stack based on the hyperparameter values.""" |
layers = hparams.get(prefix + "layers")
return transformer.LayerStack(
[layers_registry[l](hparams, prefix) for l in layers],
dropout_rate=hparams.layer_prepostprocess_dropout,
norm_epsilon=hparams.norm_epsilon) |
<SYSTEM_TASK:>
Hyperparameters for single-stack Transformer.
<END_TASK>
<USER_TASK:>
Description:
def mtf_unitransformer_base():
"""Hyperparameters for single-stack Transformer.""" |
hparams = mtf_transformer2_base()
hparams.add_hparam("autoregressive", True)
# HYPERPARAMETERS FOR THE SINGLE LAYER STACK
hparams.add_hparam("layers", ["self_att", "drd"] * 6)
# number of heads in multihead attention
hparams.add_hparam("num_heads", 8)
# default of 0 for standard transformer behavior
# 1 means a single set of keys and values that are read by all query heads
hparams.add_hparam("num_memory_heads", 0)
# share attention keys and values
hparams.add_hparam("shared_kv", False)
# if nonzero then use local attention
hparams.add_hparam("local_attention_radius", 128)
return hparams |
<SYSTEM_TASK:>
Model incorporating mixture-of-experts, local and global attention.
<END_TASK>
<USER_TASK:>
Description:
def mtr_lm_v1():
"""Model incorporating mixture-of-experts, local and global attention.
~6B parameters
32 experts in 3 hierarchichal moe layers.
Returns:
a hparams
""" |
hparams = mtr_lm_dense(0)
hparams.layers = (["local_self_att", "local_self_att", "drd",
"self_att", "drd", "local_self_att",
"local_self_att", "moe_2d"] * 4)[:-1]
hparams.d_kv = 128
hparams.moe_expert_x = 8
hparams.moe_expert_y = 4
hparams.moe_hidden_size = 32768
hparams.d_ff = 2048
hparams.num_memory_heads = 0
hparams.mesh_shape = "b0:4;b1:8"
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.outer_batch_size = 4
return hparams |
<SYSTEM_TASK:>
Series of machine translation models.
<END_TASK>
<USER_TASK:>
Description:
def mtr_tr_dense(sz):
"""Series of machine translation models.
All models are trained on sequences of 256 tokens.
You can use the dataset translate_enfr_wmt32k_packed.
154000 steps = 3 epochs.
Args:
sz: an integer
Returns:
a hparams
""" |
n = 2 ** sz
hparams = mtf_bitransformer_base()
hparams.d_model = 1024
hparams.max_length = 256
hparams.batch_size = 128
hparams.d_ff = int(4096 * n)
hparams.d_kv = 128
hparams.encoder_num_heads = int(8 * n)
hparams.decoder_num_heads = int(8 * n)
# one epoch for translate_enfr_wmt32k_packed = 51400 steps
hparams.learning_rate_decay_steps = 51400
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
hparams.label_smoothing = 0.1
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
return hparams |
<SYSTEM_TASK:>
With local self-attention in the decoder.
<END_TASK>
<USER_TASK:>
Description:
def mtr_tr_dense_local(sz):
"""With local self-attention in the decoder.""" |
hparams = mtr_tr_dense(sz)
hparams.decoder_layers = ["local_self_att", "enc_att", "drd"] * 6
hparams.local_attention_radius = 32
return hparams |
<SYSTEM_TASK:>
Recurrent decoder function.
<END_TASK>
<USER_TASK:>
Description:
def recurrent_transformer_decoder(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
"""Recurrent decoder function.""" |
x = decoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
with tf.variable_scope(name):
ffn_unit = functools.partial(
# use encoder ffn, since decoder ffn use left padding
universal_transformer_util.transformer_encoder_ffn_unit,
hparams=hparams,
nonpadding_mask=nonpadding)
attention_unit = functools.partial(
universal_transformer_util.transformer_decoder_attention_unit,
hparams=hparams,
encoder_output=encoder_output,
decoder_self_attention_bias=decoder_self_attention_bias,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
attention_dropout_broadcast_dims=attention_dropout_broadcast_dims,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary)
x, extra_output = universal_transformer_util.universal_transformer_layer(
x, hparams, ffn_unit, attention_unit)
return common_layers.layer_preprocess(x, hparams), extra_output |
<SYSTEM_TASK:>
Block of batch norm and relu.
<END_TASK>
<USER_TASK:>
Description:
def batch_norm_relu(inputs, is_training, relu=True):
"""Block of batch norm and relu.""" |
inputs = mtf.layers.batch_norm(
inputs,
is_training,
BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
init_zero=(not relu))
if relu:
inputs = mtf.relu(inputs)
return inputs |
<SYSTEM_TASK:>
Universal Transformer encoder function.
<END_TASK>
<USER_TASK:>
Description:
def universal_transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams,
name="encoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
"""Universal Transformer encoder function.
Prepares all the arguments and the inputs and passes it to a
universal_transformer_layer to encode the encoder_input.
Args:
encoder_input: a Tensor
encoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This must either be
passed in, which we do for "packed" datasets, or inferred from
encoder_self_attention_bias. The knowledge about padding is used
for pad_remover(efficiency) and to mask out padding in convoltutional
layers.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
y: a Tensors as the output of the encoder
extra_output: which can be used to pass extra information to the body
""" |
x = encoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
with tf.variable_scope(name):
if nonpadding is not None:
padding = 1.0 - nonpadding
else:
padding = common_attention.attention_bias_to_padding(
encoder_self_attention_bias)
nonpadding = 1.0 - padding
pad_remover = None
if hparams.use_pad_remover and not common_layers.is_xla_compiled():
pad_remover = expert_utils.PadRemover(padding)
ffn_unit = functools.partial(
transformer_encoder_ffn_unit,
hparams=hparams,
nonpadding_mask=nonpadding,
pad_remover=pad_remover)
attention_unit = functools.partial(
transformer_encoder_attention_unit,
hparams=hparams,
encoder_self_attention_bias=encoder_self_attention_bias,
attention_dropout_broadcast_dims=attention_dropout_broadcast_dims,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary)
x, extra_output = universal_transformer_layer(
x, hparams, ffn_unit, attention_unit, pad_remover=pad_remover)
return common_layers.layer_preprocess(x, hparams), extra_output |
<SYSTEM_TASK:>
Core function applying the universal transformer layer.
<END_TASK>
<USER_TASK:>
Description:
def universal_transformer_layer(x,
hparams,
ffn_unit,
attention_unit,
pad_remover=None):
"""Core function applying the universal transformer layer.
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
the output tensor, extra output (can be memory, ponder time, etc.)
Raises:
ValueError: Unknown recurrence type
""" |
def add_vanilla_transformer_layer(x, num_layers, name):
"""Passes the input through num_layers of vanilla transformer layers.
Args:
x: input
num_layers: number of layers
name: string, prefix of layer names
Returns:
output of vanilla_transformer_layer
"""
if hparams.add_position_timing_signal:
# In case of add_position_timing_signal=true, we set hparams.pos=None
# and add position timing signal at the beginning of each step, so for
# the vanilla transformer, we need to add timing signal here.
x = common_attention.add_timing_signal_1d(x)
for layer in range(num_layers):
with tf.variable_scope(name + "layer_%d" % layer):
x = ffn_unit(attention_unit(x))
return x
with tf.variable_scope("universal_transformer_%s" % hparams.recurrence_type):
if (hparams.mix_with_transformer and
"before_ut" in hparams.mix_with_transformer):
x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers,
"before_ut_")
if hparams.recurrence_type == "act":
output, extra_output = universal_transformer_act(
x, hparams, ffn_unit, attention_unit)
else: # for all the other recurrency types with fixed number of steps
ut_function, initializer = get_ut_layer(x, hparams, ffn_unit,
attention_unit, pad_remover)
output, _, extra_output = tf.foldl(
ut_function, tf.range(hparams.num_rec_steps),
initializer=initializer)
# Right now, this is only possible when the transition function is an lstm
if (hparams.recurrence_type == "lstm" and
hparams.get("use_memory_as_final_state", False)):
output = extra_output
if (hparams.mix_with_transformer and
"after_ut" in hparams.mix_with_transformer):
output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers,
"after_ut_")
return output, extra_output |
<SYSTEM_TASK:>
Provides the function that is used in universal transforemr steps.
<END_TASK>
<USER_TASK:>
Description:
def get_ut_layer(x,
hparams,
ffn_unit,
attention_unit,
pad_remover=None):
"""Provides the function that is used in universal transforemr steps.
Args:
x: input
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
ut_function and the ut_initializer
Raises:
ValueError: Unknown recurrence type
""" |
if hparams.recurrence_type == "basic":
ut_initializer = (x, x, x) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_basic,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit)
elif hparams.recurrence_type == "highway":
ut_initializer = (x, x, x) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_highway,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "skip":
ut_initializer = (x, x, x) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_skip,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "dwa":
# memory contains the original input + all the states
memory_size = hparams.num_rec_steps + 1
# prepare initializer:
memory_empty = tf.zeros([memory_size] + common_layers.shape_list(x))
# filling the first slot with the original input
memory = fill_memory_slot(memory_empty, x, 0)
ut_initializer = (x, x, memory) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_depthwise_attention,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit)
elif hparams.recurrence_type == "gru":
ut_initializer = (x, x, x) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_with_gru_as_transition_function,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "lstm":
memory = tf.zeros(common_layers.shape_list(x))
ut_initializer = (x, x, memory) # (state, input, memory)
ut_function = functools.partial(
universal_transformer_with_lstm_as_transition_function,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
else:
raise ValueError("Unknown recurrence type: %s" % hparams.recurrence_type)
return ut_function, ut_initializer |
<SYSTEM_TASK:>
Applies a feed-forward function which is parametrised for encoding.
<END_TASK>
<USER_TASK:>
Description:
def transformer_encoder_ffn_unit(x,
hparams,
nonpadding_mask=None,
pad_remover=None):
"""Applies a feed-forward function which is parametrised for encoding.
Args:
x: input
hparams: model hyper-parameters
nonpadding_mask: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used
to mask out padding in convoltutional layers. We generally only
need this mask for "packed" datasets, because for ordinary datasets,
no padding is ever followed by nonpadding.
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
the output tensor
""" |
with tf.variable_scope("ffn"):
if hparams.transformer_ffn_type == "fc":
y = transformer.transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams),
hparams,
pad_remover,
conv_padding="SAME",
nonpadding_mask=nonpadding_mask)
if hparams.transformer_ffn_type == "sepconv":
assert nonpadding_mask is not None, (
"The nonpadding_mask should be provided, otherwise the model uses "
"the leaked padding information to estimate the length!")
y = common_layers.sepconv_relu_sepconv(
common_layers.layer_preprocess(x, hparams),
filter_size=hparams.filter_size,
output_size=hparams.hidden_size,
first_kernel_size=(3, 1),
second_kernel_size=(5, 1),
padding="SAME",
nonpadding_mask=nonpadding_mask,
dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
return x |
<SYSTEM_TASK:>
Applies multihead attention function which is parametrised for encoding.
<END_TASK>
<USER_TASK:>
Description:
def transformer_encoder_attention_unit(x,
hparams,
encoder_self_attention_bias,
attention_dropout_broadcast_dims,
save_weights_to=None,
make_image_summary=True):
"""Applies multihead attention function which is parametrised for encoding.
Args:
x: input
hparams: model hyper-parameters
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout
layers to save memory during training
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
the output tensor
""" |
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
save_weights_to=save_weights_to,
max_relative_position=hparams.max_relative_position,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
hard_attention_k=hparams.hard_attention_k)
x = common_layers.layer_postprocess(x, y, hparams)
return x |
<SYSTEM_TASK:>
Applies multihead attention function which is parametrised for decoding.
<END_TASK>
<USER_TASK:>
Description:
def transformer_decoder_attention_unit(x,
hparams,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
attention_dropout_broadcast_dims,
save_weights_to=None,
make_image_summary=True):
"""Applies multihead attention function which is parametrised for decoding.
Args:
x: input (decoder input)
hparams: model hyper-parameters
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout
layers to save memory during training
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
The output tensor
""" |
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
save_weights_to=save_weights_to,
max_relative_position=hparams.max_relative_position,
cache=None,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
hard_attention_k=hparams.hard_attention_k)
x = common_layers.layer_postprocess(x, y, hparams)
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
hard_attention_k=hparams.hard_attention_k)
x = common_layers.layer_postprocess(x, y, hparams)
return x |
<SYSTEM_TASK:>
Basic Universal Transformer.
<END_TASK>
<USER_TASK:>
Description:
def universal_transformer_basic(layer_inputs,
step, hparams,
ffn_unit,
attention_unit):
"""Basic Universal Transformer.
This model is pretty similar to the vanilla transformer in which weights are
shared between layers. For some tasks, this simple idea brings a
generalization that is not achievable by playing with the size of the model
or drop_out parameters in the vanilla transformer.
Args:
layer_inputs:
- state: state
step: indicates number of steps taken so far
hparams: model hyper-parameters
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
layer_output:
new_state: new state
""" |
state, inputs, memory = tf.unstack(layer_inputs, num=None, axis=0,
name="unstack")
new_state = step_preprocess(state, step, hparams)
for i in range(hparams.num_inrecurrence_layers):
with tf.variable_scope("rec_layer_%d" % i):
new_state = ffn_unit(attention_unit(new_state))
return new_state, inputs, memory |
<SYSTEM_TASK:>
Universal Transformer with highway connection.
<END_TASK>
<USER_TASK:>
Description:
def universal_transformer_highway(layer_inputs,
step, hparams,
ffn_unit,
attention_unit,
pad_remover=None):
"""Universal Transformer with highway connection.
It transforms the state using a block contaaining sel-attention and transition
function and wrap the whole block with a highway connection.
(the new state is a combination of the state and the transformed-state
based on cary/transform gates.)
Interesting observation:
Controlling the cary/transform gate with the original inputs works usually
better (i.e. hparams.gates_inputs="i")
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
""" |
state, inputs, memory = layer_inputs
new_state = step_preprocess(state, step, hparams)
for i in range(hparams.num_inrecurrence_layers):
with tf.variable_scope("rec_layer_%d" % i):
new_state = ffn_unit(attention_unit(new_state))
transformed_state = new_state
gate_inputs = []
if "s" in hparams.gates_inputs:
gate_inputs.append(state)
if "t" in hparams.gates_inputs:
gate_inputs.append(transformed_state)
if "i" in hparams.gates_inputs:
gate_inputs.append(inputs)
gate_ffn_layer = hparams.gate_ffn_layer
transform_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="transform",
bias_initializer=tf.constant_initializer(hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True,
postprocess=True)
if hparams.couple_carry_transform_gates:
carry_gate = tf.subtract(1.0, transform_gate, name="carry")
else:
carry_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="carry",
bias_initializer=tf.constant_initializer(-hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True,
postprocess=True)
new_state = state * carry_gate + transformed_state * transform_gate
tf.contrib.summary.scalar("highway_transform_gate_layer",
tf.reduce_mean(transform_gate))
tf.contrib.summary.scalar("highway_carry_gate_layer",
tf.reduce_mean(carry_gate))
return new_state, inputs, memory |
<SYSTEM_TASK:>
universal_transformer with depth-wise attention.
<END_TASK>
<USER_TASK:>
Description:
def universal_transformer_depthwise_attention(layer_inputs,
step, hparams,
ffn_unit,
attention_unit):
"""universal_transformer with depth-wise attention.
It uses an attention mechanism-flipped vertically-
over all the states from previous steps to generate the new_state.
Args:
layer_inputs:
- state: state
- memory: contains states from all the previous steps.
step: indicating number of steps take so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
Returns:
layer_output:
new_state: new state
memory: contains states from all the previous steps.
""" |
_, inputs, memory = layer_inputs
all_states = memory
# add depth signal
if hparams.depth_embedding:
all_states = add_depth_embedding(all_states)
# get the states up to the current step (non-zero part of the memory)
states_so_far = all_states[:step, :, :, :]
states_so_far_weights = tf.nn.softmax(
common_layers.dense(
states_so_far, (hparams.hidden_size if hparams.dwa_elements else 1),
activation=None,
use_bias=True),
axis=-1)
# prepare the state tensor that will be transformed
state_to_be_transformed = tf.reduce_sum(
(states_so_far * states_so_far_weights), axis=0)
new_state = step_preprocess(state_to_be_transformed, step, hparams)
for i in range(hparams.num_inrecurrence_layers):
with tf.variable_scope("rec_layer_%d" % i):
new_state = ffn_unit(attention_unit(new_state))
# add the new state to the memory
memory = fill_memory_slot(memory, new_state, step + 1)
return new_state, inputs, memory |
<SYSTEM_TASK:>
Universal Transformer which uses a gru as transition function.
<END_TASK>
<USER_TASK:>
Description:
def universal_transformer_with_gru_as_transition_function(
layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):
"""Universal Transformer which uses a gru as transition function.
It's kind of like having a gru, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: not used here
- memory: not used here
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: not uesed
memory: not used
""" |
state, unused_inputs, unused_memory = tf.unstack(
layer_inputs, num=None, axis=0, name="unstack")
# state (ut_state): output of the gru in the previous step
# Multi_head_attention:
assert not hparams.add_step_timing_signal # Let gru count for us!
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
# Transition Function:
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input)
transition_function_input = common_layers.layer_preprocess(
transition_function_input, hparams)
with tf.variable_scope("gru"):
# gru update gate: z_t = sigmoid(W_z.x_t + U_z.h_{t-1})
transition_function_update_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="update",
bias_initializer=tf.constant_initializer(1.0),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("gru_update_gate",
tf.reduce_mean(transition_function_update_gate))
# gru reset gate: r_t = sigmoid(W_r.x_t + U_r.h_{t-1})
transition_function_reset_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="reset",
bias_initializer=tf.constant_initializer(1.0),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("gru_reset_gate",
tf.reduce_mean(transition_function_reset_gate))
reset_state = transition_function_reset_gate * state
# gru_candidate_activation: h' = tanh(W_{x_t} + U (r_t h_{t-1})
transition_function_candidate = _ffn_layer_multi_inputs(
[transition_function_input, reset_state],
hparams,
name="candidate",
bias_initializer=tf.zeros_initializer(),
activation=tf.tanh,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
transition_function_output = (
(1 - transition_function_update_gate) * transition_function_input +
transition_function_update_gate * transition_function_candidate)
transition_function_output = common_layers.layer_preprocess(
transition_function_output, hparams)
return transition_function_output, unused_inputs, unused_memory |
<SYSTEM_TASK:>
Universal Transformer which uses a lstm as transition function.
<END_TASK>
<USER_TASK:>
Description:
def universal_transformer_with_lstm_as_transition_function(
layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):
"""Universal Transformer which uses a lstm as transition function.
It's kind of like having a lstm, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
- memory: memory used in lstm.
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
memory: contains information of state from all the previous steps.
""" |
state, unused_inputs, memory = tf.unstack(
layer_inputs, num=None, axis=0, name="unstack")
# NOTE:
# state (ut_state): output of the lstm in the previous step
# inputs (ut_input): original input --> we don't use it here
# memory: lstm memory
# Multi_head_attention:
assert not hparams.add_step_timing_signal # Let lstm count for us!
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
# Transition Function:
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input)
transition_function_input = common_layers.layer_preprocess(
transition_function_input, hparams)
with tf.variable_scope("lstm"):
# lstm input gate: i_t = sigmoid(W_i.x_t + U_i.h_{t-1})
transition_function_input_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("lstm_input_gate",
tf.reduce_mean(transition_function_input_gate))
# lstm forget gate: f_t = sigmoid(W_f.x_t + U_f.h_{t-1})
transition_function_forget_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="forget",
bias_initializer=tf.zeros_initializer(),
activation=None,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
forget_bias_tensor = tf.constant(hparams.lstm_forget_bias)
transition_function_forget_gate = tf.sigmoid(
transition_function_forget_gate + forget_bias_tensor)
tf.contrib.summary.scalar("lstm_forget_gate",
tf.reduce_mean(transition_function_forget_gate))
# lstm output gate: o_t = sigmoid(W_o.x_t + U_o.h_{t-1})
transition_function_output_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="output",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("lstm_output_gate",
tf.reduce_mean(transition_function_output_gate))
# lstm input modulation
transition_function_input_modulation = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input_modulation",
bias_initializer=tf.zeros_initializer(),
activation=tf.tanh,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
transition_function_memory = (
memory * transition_function_forget_gate +
transition_function_input_gate * transition_function_input_modulation)
transition_function_output = (
tf.tanh(transition_function_memory) * transition_function_output_gate)
transition_function_output = common_layers.layer_preprocess(
transition_function_output, hparams)
return transition_function_output, unused_inputs, transition_function_memory |
<SYSTEM_TASK:>
Implements a Feed-forward layer with multiple inputs, pad-removing, etc.
<END_TASK>
<USER_TASK:>
Description:
def _ffn_layer_multi_inputs(inputs_list,
hparams,
ffn_layer_type="dense",
name="ffn",
kernel_initializer=None,
bias_initializer=None,
activation=None,
pad_remover=None,
preprocess=False,
postprocess=False):
"""Implements a Feed-forward layer with multiple inputs, pad-removing, etc.
Args:
inputs_list: list of input tensors
hparams: hyper-parameters
ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense
name: name
kernel_initializer: kernel initializer
bias_initializer: bias initializer
activation: activation function
pad_remover: pad remover
preprocess: if preprocess the input
postprocess: if postprocess the output
Returns:
a tensor
Raises:
ValueError: Unknown ffn_layer type.
""" |
# need at least one inputs
num_inputs = len(inputs_list)
assert num_inputs > 0
if preprocess and num_inputs == 1:
inputs_list[0] = common_layers.layer_preprocess(inputs_list[0], hparams)
if postprocess:
original_inputs = inputs_list[0]
# the output size is the hidden size of the main inputs
main_input = inputs_list[0]
original_shape = common_layers.shape_list(main_input)
assert hparams.hidden_size == common_layers.shape_list(main_input)[-1]
# all the inputs are in the same shape with main inputs
for inputs in inputs_list:
main_input.get_shape().assert_is_compatible_with(inputs.get_shape())
def remove_pads(x):
original_shape = common_layers.shape_list(x)
# Collapse `x` across examples, and remove padding positions.
x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0))
x = tf.expand_dims(pad_remover.remove(x), axis=0)
return x
if pad_remover:
for i, inputs in enumerate(inputs_list):
inputs_list[i] = remove_pads(inputs)
ffn_inputs = inputs_list[0]
if len(inputs_list) != 1:
ffn_inputs = tf.concat(inputs_list, axis=-1)
if ffn_layer_type == "dense":
output = common_layers.dense(
ffn_inputs,
hparams.hidden_size,
name=name,
activation=activation,
use_bias=True,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
elif ffn_layer_type == "dense_dropconnect":
output = common_layers.dense_dropconnect(
ffn_inputs,
hparams.hidden_size,
name=name,
dropconnect_dropout=hparams.dropconnect_dropout,
output_activation=activation)
postprocess = False # no dropout on the output unit
elif ffn_layer_type == "dense_relu_dense":
output = common_layers.dense_relu_dense(
ffn_inputs,
hparams.filter_size,
hparams.hidden_size,
name=name,
dropout=hparams.relu_dropout,
output_activation=activation,
)
else:
raise ValueError("Unknown ffn_layer type: %s" % ffn_layer_type)
if pad_remover:
# Restore `output` to the original shape of `x`, including padding.
output = tf.reshape(
pad_remover.restore(tf.squeeze(output, axis=0)), original_shape)
if postprocess:
if num_inputs == 1:
output = common_layers.layer_postprocess(original_inputs, output, hparams)
else: # only dropout (no residual)x
hp = copy.copy(hparams)
hp.layer_postprocess_sequence = hp.layer_postprocess_sequence.replace(
"a", "")
output = common_layers.layer_postprocess(original_inputs, output, hp)
return output |
<SYSTEM_TASK:>
Fills the memory slot at a particular index with the given value.
<END_TASK>
<USER_TASK:>
Description:
def fill_memory_slot(memory, value, index):
"""Fills the memory slot at a particular index with the given value.
Args:
memory: a 4-d tensor [memory_size, batch, length, channel] containing
the state of all steps
value: a 3-d tensor [batch, length, channel] as the sate
index: integer in [0, memory_size)
Returns:
filled memory
""" |
mask = tf.to_float(
tf.one_hot(index,
tf.shape(memory)[0])[:, None, None, None])
fill_memory = (1 - mask) * memory + mask * value[None, ...]
return fill_memory |
<SYSTEM_TASK:>
Preprocess the input at the beginning of each step.
<END_TASK>
<USER_TASK:>
Description:
def step_preprocess(x, step, hparams):
"""Preprocess the input at the beginning of each step.
Args:
x: input tensor
step: step
hparams: model hyper-parameters
Returns:
preprocessed input.
""" |
original_channel_size = common_layers.shape_list(x)[-1]
if hparams.add_position_timing_signal:
x = add_position_timing_signal(x, step, hparams)
if hparams.add_step_timing_signal:
x = add_step_timing_signal(x, step, hparams)
if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal)
and hparams.add_or_concat_timing_signal == "concat"):
# linear projection to the original dimension of x
x = common_layers.dense(
x, original_channel_size, activation=None, use_bias=False)
if hparams.add_sru:
x = common_layers.sru(x)
return x |
<SYSTEM_TASK:>
Iterate through records in WET file object.
<END_TASK>
<USER_TASK:>
Description:
def wet_records_from_file_obj(f, take_ownership=False):
"""Iterate through records in WET file object.""" |
while True:
record = WETRecord.read(f)
if record is None:
break
if not record.url:
continue
yield record
if take_ownership:
f.close() |
<SYSTEM_TASK:>
Read header from file. Headers end with length and then 1 blank line.
<END_TASK>
<USER_TASK:>
Description:
def read(cls, f):
"""Read header from file. Headers end with length and then 1 blank line.""" |
url = None
line = f.readline()
if not line:
# EOF
return None
while not line.startswith(cls.LENGTH_HEADER):
if line.startswith(cls.URI_HEADER):
url = line[len(cls.URI_HEADER):].strip()
line = f.readline()
# Consume empty separator
f.readline()
# Read content
length = int(line.split(':')[1])
return cls(url, length) |
<SYSTEM_TASK:>
Read WETRecord from file. Records end with 2 blank lines.
<END_TASK>
<USER_TASK:>
Description:
def read(cls, f):
"""Read WETRecord from file. Records end with 2 blank lines.""" |
header = WETHeader.read(f)
if header is None:
# EOF
return None
content = f.read(header.length)
# Consume empty separators
f.readline()
f.readline()
return cls(header.url, content) |
<SYSTEM_TASK:>
Multi-layer feed-forward neural network with non-linear activations.
<END_TASK>
<USER_TASK:>
Description:
def MLP(num_hidden_layers=2,
hidden_size=512,
activation_fn=layers.Relu,
num_output_classes=10,
mode="train"):
"""Multi-layer feed-forward neural network with non-linear activations.""" |
del mode
cur_layers = [layers.Flatten()]
for _ in range(num_hidden_layers):
cur_layers += [layers.Dense(hidden_size), activation_fn()]
cur_layers += [layers.Dense(num_output_classes), layers.LogSoftmax()]
return layers.Serial(*cur_layers) |
<SYSTEM_TASK:>
Verifies that all the envs have the same observation and action space.
<END_TASK>
<USER_TASK:>
Description:
def _verify_same_spaces(self):
"""Verifies that all the envs have the same observation and action space.""" |
# Pre-conditions: self._envs is initialized.
if self._envs is None:
raise ValueError("Environments not initialized.")
if not isinstance(self._envs, list):
tf.logging.warning("Not checking observation and action space "
"compatibility across envs, since there is just one.")
return
# NOTE: We compare string representations of observation_space and
# action_space because compositional classes like space.Tuple don't return
# true on object comparison.
if not all(
str(env.observation_space) == str(self.observation_space)
for env in self._envs):
err_str = ("All environments should have the same observation space, but "
"don't.")
tf.logging.error(err_str)
# Log all observation spaces.
for i, env in enumerate(self._envs):
tf.logging.error("Env[%d] has observation space [%s]", i,
env.observation_space)
raise ValueError(err_str)
if not all(
str(env.action_space) == str(self.action_space) for env in self._envs):
err_str = "All environments should have the same action space, but don't."
tf.logging.error(err_str)
# Log all action spaces.
for i, env in enumerate(self._envs):
tf.logging.error("Env[%d] has action space [%s]", i, env.action_space)
raise ValueError(err_str) |
<SYSTEM_TASK:>
Initializes the environments and trajectories.
<END_TASK>
<USER_TASK:>
Description:
def initialize_environments(self, batch_size=1):
"""Initializes the environments and trajectories.
Subclasses can override this if they don't want a default implementation
which initializes `batch_size` environments, but must take care to
initialize self._trajectories (this is checked in __init__ anyways).
Args:
batch_size: (int) Number of `self.base_env_name` envs to initialize.
""" |
assert batch_size >= 1
self._batch_size = batch_size
self._envs = [gym.make(self.base_env_name) for _ in range(batch_size)]
if self._env_wrapper_fn is not None:
self._envs = list(map(self._env_wrapper_fn, self._envs))
# If self.observation_space and self.action_space aren't None, then it means
# that this is a re-initialization of this class, in that case make sure
# that this matches our previous behaviour.
if self._observation_space:
assert str(self._observation_space) == str(
self._envs[0].observation_space)
else:
# This means that we are initializing this class for the first time.
#
# We set this equal to the first env's observation space, later on we'll
# verify that all envs have the same observation space.
self._observation_space = self._envs[0].observation_space
# Similarly for action_space
if self._action_space:
assert str(self._action_space) == str(self._envs[0].action_space)
else:
self._action_space = self._envs[0].action_space
self._verify_same_spaces()
# If self.reward_range is None, i.e. this means that we should take the
# reward range of the env.
if self.reward_range is None:
self._reward_range = self._envs[0].reward_range
# This data structure stores the history of each env.
#
# NOTE: Even if the env is a NN and can step in all batches concurrently, it
# is still valuable to store the trajectories separately.
self._trajectories = trajectory.BatchTrajectory(batch_size=batch_size) |
<SYSTEM_TASK:>
Clips, rounds, and changes to integer type.
<END_TASK>
<USER_TASK:>
Description:
def process_rewards(self, rewards):
"""Clips, rounds, and changes to integer type.
Args:
rewards: numpy array of raw (float) rewards.
Returns:
processed_rewards: numpy array of np.int64
""" |
min_reward, max_reward = self.reward_range
# Clips at min and max reward.
rewards = np.clip(rewards, min_reward, max_reward)
# Round to (nearest) int and convert to integral type.
rewards = np.around(rewards, decimals=0).astype(np.int64)
return rewards |
<SYSTEM_TASK:>
Returns the number of distinct rewards.
<END_TASK>
<USER_TASK:>
Description:
def num_rewards(self):
"""Returns the number of distinct rewards.
Returns:
Returns None if the reward range is infinite or the processed rewards
aren't discrete, otherwise returns the number of distinct rewards.
""" |
# Pre-conditions: reward range is finite.
# : processed rewards are discrete.
if not self.is_reward_range_finite:
tf.logging.error("Infinite reward range, `num_rewards returning None`")
return None
if not self.is_processed_rewards_discrete:
tf.logging.error(
"Processed rewards are not discrete, `num_rewards` returning None")
return None
min_reward, max_reward = self.reward_range
return max_reward - min_reward + 1 |
<SYSTEM_TASK:>
Resets environments at indices shouldn't pre-process or record.
<END_TASK>
<USER_TASK:>
Description:
def _reset(self, indices):
"""Resets environments at indices shouldn't pre-process or record.
Subclasses should override this to do the actual reset if something other
than the default implementation is desired.
Args:
indices: list of indices of underlying envs to call reset on.
Returns:
np.ndarray of stacked observations from the reset-ed envs.
""" |
# Pre-conditions: common_preconditions, see `assert_common_preconditions`.
self.assert_common_preconditions()
# This returns a numpy array with first dimension `len(indices)` and the
# rest being the dimensionality of the observation.
return np.stack([self._envs[index].reset() for index in indices]) |
<SYSTEM_TASK:>
Takes a step in all environments, shouldn't pre-process or record.
<END_TASK>
<USER_TASK:>
Description:
def _step(self, actions):
"""Takes a step in all environments, shouldn't pre-process or record.
Subclasses should override this to do the actual step if something other
than the default implementation is desired.
Args:
actions: (np.ndarray) with first dimension equal to the batch size.
Returns:
a tuple of stacked raw observations, raw rewards, dones and infos.
""" |
# Pre-conditions: common_preconditions, see `assert_common_preconditions`.
# : len(actions) == len(self._envs)
self.assert_common_preconditions()
assert len(actions) == len(self._envs)
observations = []
rewards = []
dones = []
infos = []
# Take steps in all environments.
for env, action in zip(self._envs, actions):
observation, reward, done, info = env.step(action)
observations.append(observation)
rewards.append(reward)
dones.append(done)
infos.append(info)
# Convert each list (observations, rewards, ...) into np.array and return a
# tuple.
return tuple(map(np.stack, [observations, rewards, dones, infos])) |
<SYSTEM_TASK:>
Takes a step in all environments.
<END_TASK>
<USER_TASK:>
Description:
def step(self, actions):
"""Takes a step in all environments.
Subclasses should override _step to do the actual reset if something other
than the default implementation is desired.
Args:
actions: Batch of actions.
Returns:
(preprocessed_observations, processed_rewards, dones, infos).
""" |
observations, raw_rewards, dones, infos = self._step(actions)
# Process rewards.
raw_rewards = raw_rewards.astype(np.float32)
processed_rewards = self.process_rewards(raw_rewards)
# Process observations.
processed_observations = self.process_observations(observations)
# Record history.
self.trajectories.step(processed_observations, raw_rewards,
processed_rewards, dones, actions)
return processed_observations, processed_rewards, dones, infos |
<SYSTEM_TASK:>
Data fields to store on disk and their decoders.
<END_TASK>
<USER_TASK:>
Description:
def example_reading_spec(self):
"""Data fields to store on disk and their decoders.""" |
# Subclasses can override and/or extend.
processed_reward_type = tf.float32
if self.is_processed_rewards_discrete:
processed_reward_type = tf.int64
data_fields = {
TIMESTEP_FIELD: tf.FixedLenFeature((1,), tf.int64),
RAW_REWARD_FIELD: tf.FixedLenFeature((1,), tf.float32),
PROCESSED_REWARD_FIELD: tf.FixedLenFeature((1,), processed_reward_type),
DONE_FIELD: tf.FixedLenFeature((1,), tf.int64), # we wrote this as int.
# Special treatment because we need to determine type and shape, also
# enables classes to override.
OBSERVATION_FIELD: self.observation_spec,
ACTION_FIELD: self.action_spec,
}
data_items_to_decoders = {
field: tf.contrib.slim.tfexample_decoder.Tensor(field)
for field in data_fields
}
return data_fields, data_items_to_decoders |
<SYSTEM_TASK:>
A generator to yield single time-steps from a list of trajectories.
<END_TASK>
<USER_TASK:>
Description:
def _generate_time_steps(self, trajectory_list):
"""A generator to yield single time-steps from a list of trajectories.""" |
for single_trajectory in trajectory_list:
assert isinstance(single_trajectory, trajectory.Trajectory)
# Skip writing trajectories that have only a single time-step -- this
# could just be a repeated reset.
if single_trajectory.num_time_steps <= 1:
continue
for index, time_step in enumerate(single_trajectory.time_steps):
# The first time-step doesn't have reward/processed_reward, if so, just
# setting it to 0.0 / 0 should be OK.
raw_reward = time_step.raw_reward
if not raw_reward:
raw_reward = 0.0
processed_reward = time_step.processed_reward
if not processed_reward:
processed_reward = 0
action = time_step.action
if action is None:
# The last time-step doesn't have action, and this action shouldn't be
# used, gym's spaces have a `sample` function, so let's just sample an
# action and use that.
action = self.action_space.sample()
action = gym_spaces_utils.gym_space_encode(self.action_space, action)
if six.PY3:
# py3 complains that, to_example cannot handle np.int64 !
action_dtype = self.action_space.dtype
if action_dtype in [np.int64, np.int32]:
action = list(map(int, action))
elif action_dtype in [np.float64, np.float32]:
action = list(map(float, action))
# same with processed_reward.
processed_reward = int(processed_reward)
assert time_step.observation is not None
yield {
TIMESTEP_FIELD: [index],
ACTION_FIELD:
action,
# to_example errors on np.float32
RAW_REWARD_FIELD: [float(raw_reward)],
PROCESSED_REWARD_FIELD: [processed_reward],
# to_example doesn't know bools
DONE_FIELD: [int(time_step.done)],
OBSERVATION_FIELD:
gym_spaces_utils.gym_space_encode(self.observation_space,
time_step.observation),
} |
<SYSTEM_TASK:>
Transformer preparations and encoder.
<END_TASK>
<USER_TASK:>
Description:
def encode(x, x_space, hparams, name):
"""Transformer preparations and encoder.""" |
with tf.variable_scope(name):
(encoder_input, encoder_self_attention_bias,
ed) = transformer.transformer_prepare_encoder(x, x_space, hparams)
encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout)
return transformer.transformer_encoder(
encoder_input, encoder_self_attention_bias, hparams), ed |
<SYSTEM_TASK:>
A policy and value net function.
<END_TASK>
<USER_TASK:>
Description:
def policy_and_value_net(rng_key,
batch_observations_shape,
num_actions,
bottom_layers=None):
"""A policy and value net function.""" |
# Layers.
cur_layers = []
if bottom_layers is not None:
cur_layers.extend(bottom_layers)
# Now, with the current logits, one head computes action probabilities and the
# other computes the value function.
# NOTE: The LogSoftmax instead of the Softmax because of numerical stability.
cur_layers.extend([layers.Branch(), layers.Parallel(
layers.Serial(layers.Dense(num_actions), layers.LogSoftmax()),
layers.Dense(1)
)])
net = layers.Serial(*cur_layers)
return net.initialize(batch_observations_shape, rng_key), net |
<SYSTEM_TASK:>
Dumps the params with `logging.error`.
<END_TASK>
<USER_TASK:>
Description:
def log_params(params, name="params"):
"""Dumps the params with `logging.error`.""" |
for i, param in enumerate(params):
if not param:
# Empty tuple.
continue
if not isinstance(param, (list, tuple)):
logging.error(
"%s[%d] : (%s) = [%s]", name, i, param.shape, onp.array(param))
else:
for j, p in enumerate(param):
logging.error(
"\t%s[%d, %d] = [%s]", name, i, j, onp.array(p)) |
<SYSTEM_TASK:>
Collect trajectories with the given policy net and behaviour.
<END_TASK>
<USER_TASK:>
Description:
def collect_trajectories(env,
policy_fun,
num_trajectories=1,
policy="greedy",
max_timestep=None,
epsilon=0.1):
"""Collect trajectories with the given policy net and behaviour.
Args:
env: A gym env interface, for now this is not-batched.
policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable.
num_trajectories: int, number of trajectories.
policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e.
how to use the policy_fun to return an action.
max_timestep: int or None, the index of the maximum time-step at which we
return the trajectory, None for ending a trajectory only when env
returns done.
epsilon: float, the epsilon for `epsilon-greedy` policy.
Returns:
trajectory: list of (observation, action, reward) tuples, where each element
`i` is a tuple of numpy arrays with shapes as follows:
observation[i] = (B, T_i + 1)
action[i] = (B, T_i)
reward[i] = (B, T_i)
""" |
trajectories = []
for t in range(num_trajectories):
t_start = time.time()
rewards = []
actions = []
done = False
observation = env.reset()
# This is currently shaped (1, 1) + OBS, but new observations will keep
# getting added to it, making it eventually (1, T+1) + OBS
observation_history = observation[np.newaxis, np.newaxis, :]
# Run either till we're done OR if max_timestep is defined only till that
# timestep.
ts = 0
while ((not done) and
(not max_timestep or observation_history.shape[1] < max_timestep)):
ts_start = time.time()
# Run the policy, to pick an action, shape is (1, t, A) because
# observation_history is shaped (1, t) + OBS
predictions = policy_fun(observation_history)
# We need the predictions for the last time-step, so squeeze the batch
# dimension and take the last time-step.
predictions = np.squeeze(predictions, axis=0)[-1]
# Policy can be run in one of the following ways:
# - Greedy
# - Epsilon-Greedy
# - Categorical-Sampling
action = None
if policy == "greedy":
action = np.argmax(predictions)
elif policy == "epsilon-greedy":
# A schedule for epsilon is 1/k where k is the episode number sampled.
if onp.random.random() < epsilon:
# Choose an action at random.
action = onp.random.randint(0, high=len(predictions))
else:
# Return the best action.
action = np.argmax(predictions)
elif policy == "categorical-sampling":
# NOTE: The predictions aren't probabilities but log-probabilities
# instead, since they were computed with LogSoftmax.
# So just np.exp them to make them probabilities.
predictions = np.exp(predictions)
action = onp.argwhere(onp.random.multinomial(1, predictions) == 1)
else:
raise ValueError("Unknown policy: %s" % policy)
# NOTE: Assumption, single batch.
try:
action = int(action)
except TypeError as err:
# Let's dump some information before we die off.
logging.error("Cannot convert action into an integer: [%s]", err)
logging.error("action.shape: [%s]", action.shape)
logging.error("action: [%s]", action)
logging.error("predictions.shape: [%s]", predictions.shape)
logging.error("predictions: [%s]", predictions)
logging.error("observation_history: [%s]", observation_history)
raise err
observation, reward, done, _ = env.step(action)
# observation is of shape OBS, so add extra dims and concatenate on the
# time dimension.
observation_history = np.concatenate(
[observation_history, observation[np.newaxis, np.newaxis, :]], axis=1)
rewards.append(reward)
actions.append(action)
ts += 1
logging.vlog(
2, " Collected time-step[ %5d] of trajectory[ %5d] in [%0.2f] msec.",
ts, t, get_time(ts_start))
logging.vlog(
2, " Collected trajectory[ %5d] in [%0.2f] msec.", t, get_time(t_start))
# This means we are done we're been terminated early.
assert done or (
max_timestep and max_timestep >= observation_history.shape[1])
# observation_history is (1, T+1) + OBS, lets squeeze out the batch dim.
observation_history = np.squeeze(observation_history, axis=0)
trajectories.append(
(observation_history, np.stack(actions), np.stack(rewards)))
return trajectories |
<SYSTEM_TASK:>
Returns the padding value given a dtype.
<END_TASK>
<USER_TASK:>
Description:
def get_padding_value(dtype):
"""Returns the padding value given a dtype.""" |
padding_value = None
if dtype == np.uint8:
padding_value = np.uint8(0)
elif dtype == np.uint16:
padding_value = np.uint16(0)
elif dtype == np.float32:
padding_value = 0.0
else:
padding_value = 0
assert padding_value is not None
return padding_value |
<SYSTEM_TASK:>
Pad trajectories to a bucket length that is a multiple of boundary.
<END_TASK>
<USER_TASK:>
Description:
def pad_trajectories(trajectories, boundary=20):
"""Pad trajectories to a bucket length that is a multiple of boundary.
Args:
trajectories: list[(observation, actions, rewards)], where each observation
is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the
length of the list being B (batch size).
boundary: int, bucket length, the actions and rewards are padded to integer
multiples of boundary.
Returns:
tuple: (padding lengths, reward_mask, padded_observations, padded_actions,
padded_rewards) where padded_observations is shaped (B, T+1) + OBS and
padded_actions, padded_rewards & reward_mask are shaped (B, T).
Where T is max(t) rounded up to an integer multiple of boundary.
padded_length is how much padding we've added and
reward_mask is 1s for actual rewards and 0s for the padding.
""" |
# Let's compute max(t) over all trajectories.
t_max = max(r.shape[0] for (_, _, r) in trajectories)
# t_max is rounded to the next multiple of `boundary`
boundary = int(boundary)
bucket_length = boundary * int(np.ceil(float(t_max) / boundary))
# So all obs will be padded to t_max + 1 and actions and rewards to t_max.
padded_observations = []
padded_actions = []
padded_rewards = []
padded_lengths = []
reward_masks = []
for (o, a, r) in trajectories:
# Determine the amount to pad, this holds true for obs, actions and rewards.
num_to_pad = bucket_length + 1 - o.shape[0]
padded_lengths.append(num_to_pad)
if num_to_pad == 0:
padded_observations.append(o)
padded_actions.append(a)
padded_rewards.append(r)
reward_masks.append(onp.ones_like(r, dtype=np.int32))
continue
# First pad observations.
padding_config = [(0, num_to_pad, 0)]
for _ in range(o.ndim - 1):
padding_config.append((0, 0, 0))
padding_config = tuple(padding_config)
padding_value = get_padding_value(o.dtype)
action_padding_value = get_padding_value(a.dtype)
reward_padding_value = get_padding_value(r.dtype)
padded_obs = lax.pad(o, padding_value, padding_config)
padded_observations.append(padded_obs)
# Now pad actions and rewards.
assert a.ndim == 1 and r.ndim == 1
padding_config = ((0, num_to_pad, 0),)
padded_action = lax.pad(a, action_padding_value, padding_config)
padded_actions.append(padded_action)
padded_reward = lax.pad(r, reward_padding_value, padding_config)
padded_rewards.append(padded_reward)
# Also create the mask to use later.
reward_mask = onp.ones_like(r, dtype=np.int32)
reward_masks.append(lax.pad(reward_mask, 0, padding_config))
return padded_lengths, np.stack(reward_masks), np.stack(
padded_observations), np.stack(padded_actions), np.stack(padded_rewards) |
<SYSTEM_TASK:>
r"""Computes rewards to go.
<END_TASK>
<USER_TASK:>
Description:
def rewards_to_go(rewards, mask, gamma=0.99):
r"""Computes rewards to go.
Reward to go is defined as follows, the discounted reward that we have to
yet collect, going forward from this point, i.e.:
r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l})
Args:
rewards: np.ndarray of shape (B, T) of rewards.
mask: np.ndarray of shape (B, T) of mask for the rewards.
gamma: float, discount factor.
Returns:
rewards to go, np.ndarray of shape (B, T).
""" |
B, T = rewards.shape # pylint: disable=invalid-name,unused-variable
masked_rewards = rewards * mask # (B, T)
# We use the following recurrence relation, derived from the equation above:
#
# r2g[t+1] = (r2g[t] - r[t]) / gamma
#
# This means we'll need to calculate r2g[0] first and then r2g[1] and so on ..
#
# **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0
# and gamma < 1.0, so the division keeps increasing.
#
# So we just run the recurrence in reverse, i.e.
#
# r2g[t] = r[t] + (gamma*r2g[t+1])
#
# This is much better, but might have lost updates since the (small) rewards
# at earlier time-steps may get added to a (very?) large sum.
# Compute r2g_{T-1} at the start and then compute backwards in time.
r2gs = [masked_rewards[:, -1]]
# Go from T-2 down to 0.
for t in reversed(range(T - 1)):
r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))
# The list should have length T.
assert T == len(r2gs)
# First we stack them in the correct way to make it (B, T), but these are
# still from newest (T-1) to oldest (0), so then we flip it on time axis.
return np.flip(np.stack(r2gs, axis=1), axis=1) |
<SYSTEM_TASK:>
Computes the value loss.
<END_TASK>
<USER_TASK:>
Description:
def value_loss(value_net_apply,
value_net_params,
observations,
rewards,
reward_mask,
gamma=0.99):
"""Computes the value loss.
Args:
value_net_apply: value net apply function with signature (params, ndarray of
shape (B, T+1) + OBS) -> ndarray(B, T+1, 1)
value_net_params: params of value_net_apply.
observations: np.ndarray of shape (B, T+1) + OBS
rewards: np.ndarray of shape (B, T) of rewards.
reward_mask: np.ndarray of shape (B, T), the mask over rewards.
gamma: float, discount factor.
Returns:
The average L2 value loss, averaged over instances where reward_mask is 1.
""" |
B, T = rewards.shape # pylint: disable=invalid-name
assert (B, T + 1) == observations.shape[:2]
# NOTE: observations is (B, T+1) + OBS, value_prediction is (B, T+1, 1)
value_prediction = value_net_apply(observations, value_net_params)
assert (B, T + 1, 1) == value_prediction.shape
return value_loss_given_predictions(value_prediction, rewards, reward_mask,
gamma) |
<SYSTEM_TASK:>
Computes the value loss given the prediction of the value function.
<END_TASK>
<USER_TASK:>
Description:
def value_loss_given_predictions(value_prediction,
rewards,
reward_mask,
gamma=0.99):
"""Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, T+1, 1)
rewards: np.ndarray of shape (B, T) of rewards.
reward_mask: np.ndarray of shape (B, T), the mask over rewards.
gamma: float, discount factor.
Returns:
The average L2 value loss, averaged over instances where reward_mask is 1.
""" |
B, T = rewards.shape # pylint: disable=invalid-name
assert (B, T) == reward_mask.shape
assert (B, T + 1, 1) == value_prediction.shape
value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1)
value_prediction = value_prediction[:, :-1] * reward_mask # (B, T)
r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T)
loss = (value_prediction - r2g)**2
# Take an average on only the points where mask != 0.
return np.sum(loss) / np.sum(reward_mask) |
<SYSTEM_TASK:>
r"""Computes the GAE advantages given the one step TD-residuals.
<END_TASK>
<USER_TASK:>
Description:
def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):
r"""Computes the GAE advantages given the one step TD-residuals.
The formula for a GAE advantage estimator is as follows:
A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}).
Internally we just call rewards_to_go, since it is the same computation.
Args:
td_deltas: np.ndarray of shape (B, T) of one step TD-residuals.
mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the
case that the `td_deltas` are already masked correctly since they are
produced by `deltas(...)`
lambda_: float, lambda parameter for GAE estimators.
gamma: float, lambda parameter for GAE estimators.
Returns:
GAE advantage estimates.
""" |
return rewards_to_go(td_deltas, mask, lambda_ * gamma) |
<SYSTEM_TASK:>
Picks out the probabilities of the actions along batch and time-steps.
<END_TASK>
<USER_TASK:>
Description:
def chosen_probabs(probab_observations, actions):
"""Picks out the probabilities of the actions along batch and time-steps.
Args:
probab_observations: ndarray of shape `[B, T+1, A]`, where
probab_observations[b, t, i] contains the log-probability of action = i at
the t^th time-step in the b^th trajectory.
actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which
action was chosen in the b^th trajectory's t^th time-step.
Returns:
`[B, T]` ndarray with the log-probabilities of the chosen actions.
""" |
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == probab_observations.shape[:2]
return probab_observations[np.arange(B)[:, None], np.arange(T), actions] |
<SYSTEM_TASK:>
Computes the probability ratios for each time-step in a trajectory.
<END_TASK>
<USER_TASK:>
Description:
def compute_probab_ratios(p_new, p_old, actions, reward_mask):
"""Computes the probability ratios for each time-step in a trajectory.
Args:
p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy
network assigns to all the actions at each time-step in each batch using
the old parameters.
p_old: ndarray of shape [B, T+1, A], same as above, but using old policy
network parameters.
actions: ndarray of shape [B, T] where each element is from [0, A).
reward_mask: ndarray of shape [B, T] masking over probabilities.
Returns:
probab_ratios: ndarray of shape [B, T], where
probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}}
""" |
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == p_old.shape[:2]
assert (B, T + 1) == p_new.shape[:2]
logp_old = chosen_probabs(p_old, actions)
logp_new = chosen_probabs(p_new, actions)
assert (B, T) == logp_old.shape
assert (B, T) == logp_new.shape
# Since these are log-probabilities, we just subtract them.
probab_ratios = np.exp(logp_new - logp_old) * reward_mask
assert (B, T) == probab_ratios.shape
return probab_ratios |
<SYSTEM_TASK:>
PPO objective, with an eventual minus sign, given observations.
<END_TASK>
<USER_TASK:>
Description:
def ppo_loss(policy_net_apply,
new_policy_params,
old_policy_params,
value_net_apply,
value_net_params,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2):
"""PPO objective, with an eventual minus sign, given observations.""" |
B, T = padded_rewards.shape # pylint: disable=invalid-name
assert (B, T + 1) == padded_observations.shape[:2]
assert (B, T) == padded_actions.shape
assert (B, T) == padded_rewards.shape
assert (B, T) == reward_mask.shape
# Compute predicted values and predicted log-probs and hand it over to
# `ppo_loss_given_predictions`.
# (B, T+1, 1)
predicted_values = value_net_apply(padded_observations, value_net_params)
assert (B, T + 1, 1) == predicted_values.shape
# log_probab_actions_{old,new} are both (B, T+1, A)
log_probab_actions_old = policy_net_apply(padded_observations,
old_policy_params)
log_probab_actions_new = policy_net_apply(padded_observations,
new_policy_params)
assert (B, T + 1) == log_probab_actions_old.shape[:2]
assert (B, T + 1) == log_probab_actions_new.shape[:2]
assert log_probab_actions_old.shape[-1] == log_probab_actions_new.shape[-1]
return ppo_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
predicted_values,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon) |
<SYSTEM_TASK:>
PPO objective, with an eventual minus sign, given predictions.
<END_TASK>
<USER_TASK:>
Description:
def ppo_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
predicted_values,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2):
"""PPO objective, with an eventual minus sign, given predictions.""" |
B, T = padded_rewards.shape # pylint: disable=invalid-name
assert (B, T) == padded_actions.shape
assert (B, T) == reward_mask.shape
_, _, A = log_probab_actions_old.shape # pylint: disable=invalid-name
assert (B, T + 1, 1) == predicted_values.shape
assert (B, T + 1, A) == log_probab_actions_old.shape
assert (B, T + 1, A) == log_probab_actions_new.shape
# (B, T)
td_deltas = deltas(
np.squeeze(predicted_values, axis=2), # (B, T+1)
padded_rewards,
reward_mask,
gamma=gamma)
# (B, T)
advantages = gae_advantages(
td_deltas, reward_mask, lambda_=lambda_, gamma=gamma)
# (B, T)
ratios = compute_probab_ratios(log_probab_actions_new,
log_probab_actions_old,
padded_actions,
reward_mask)
assert (B, T) == ratios.shape
# (B, T)
objective = clipped_objective(
ratios, advantages, reward_mask, epsilon=epsilon)
assert (B, T) == objective.shape
# ()
average_objective = np.sum(objective) / np.sum(reward_mask)
# Loss is negative objective.
return -average_objective |
<SYSTEM_TASK:>
PPO optimizer step.
<END_TASK>
<USER_TASK:>
Description:
def ppo_opt_step(i,
opt_state,
ppo_opt_update,
policy_net_apply,
old_policy_params,
value_net_apply,
value_net_params,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.1):
"""PPO optimizer step.""" |
new_policy_params = trax_opt.get_params(opt_state)
g = grad(
ppo_loss, argnums=1)(
policy_net_apply,
new_policy_params,
old_policy_params,
value_net_apply,
value_net_params,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon)
return ppo_opt_update(i, g, opt_state) |
<SYSTEM_TASK:>
Value optimizer step.
<END_TASK>
<USER_TASK:>
Description:
def value_opt_step(i,
opt_state,
opt_update,
value_net_apply,
padded_observations,
padded_rewards,
reward_mask,
gamma=0.99):
"""Value optimizer step.""" |
value_params = trax_opt.get_params(opt_state)
# Note this partial application here and argnums above in ppo_opt_step.
g = grad(functools.partial(value_loss, value_net_apply))(
value_params,
padded_observations,
padded_rewards,
reward_mask,
gamma=gamma)
return opt_update(i, g, opt_state) |
<SYSTEM_TASK:>
Policy and Value optimizer step.
<END_TASK>
<USER_TASK:>
Description:
def policy_and_value_opt_step(i,
opt_state,
opt_update,
policy_and_value_net_apply,
old_params,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=1.0,
c2=0.01,
gamma=0.99,
lambda_=0.95,
epsilon=0.1):
"""Policy and Value optimizer step.""" |
# Combined loss function given the new params.
def policy_and_value_loss(params):
"""Returns the combined loss given just parameters."""
(loss, _, _, _) = combined_loss(
params,
old_params,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon)
return loss
new_params = trax_opt.get_params(opt_state)
g = grad(policy_and_value_loss)(new_params)
return opt_update(i, g, opt_state) |
<SYSTEM_TASK:>
Download corpora for multinli.
<END_TASK>
<USER_TASK:>
Description:
def _maybe_download_corpora(tmp_dir):
"""Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string
""" |
mnli_filename = "MNLI.zip"
mnli_finalpath = os.path.join(tmp_dir, "MNLI")
if not tf.gfile.Exists(mnli_finalpath):
zip_filepath = generator_utils.maybe_download(
tmp_dir, mnli_filename, _MNLI_URL)
zip_ref = zipfile.ZipFile(zip_filepath, "r")
zip_ref.extractall(tmp_dir)
zip_ref.close()
return mnli_finalpath |
<SYSTEM_TASK:>
Adds a residual connection to the filter x for the shake-shake model.
<END_TASK>
<USER_TASK:>
Description:
def shake_shake_skip_connection(x, output_filters, stride, is_training):
"""Adds a residual connection to the filter x for the shake-shake model.""" |
curr_filters = common_layers.shape_list(x)[-1]
if curr_filters == output_filters:
return x
stride_spec = [1, stride, stride, 1]
# Skip path 1.
path1 = tf.nn.avg_pool(x, [1, 1, 1, 1], stride_spec, "VALID")
path1 = tf.layers.conv2d(
path1, int(output_filters / 2), (1, 1), padding="SAME", name="path1_conv")
# Skip path 2.
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] # First pad with 0's then crop.
path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :]
path2 = tf.nn.avg_pool(path2, [1, 1, 1, 1], stride_spec, "VALID")
path2 = tf.layers.conv2d(
path2, int(output_filters / 2), (1, 1), padding="SAME", name="path2_conv")
# Concat and apply BN.
final_path = tf.concat(values=[path1, path2], axis=-1)
final_path = tf.layers.batch_normalization(
final_path, training=is_training, name="final_path_bn")
return final_path |
<SYSTEM_TASK:>
Building a 2 branching convnet.
<END_TASK>
<USER_TASK:>
Description:
def shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward,
hparams):
"""Building a 2 branching convnet.""" |
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
x = tf.nn.relu(x)
x = tf.layers.conv2d(
x,
output_filters, (3, 3),
strides=(stride, stride),
padding="SAME",
name="conv1")
x = tf.layers.batch_normalization(x, training=is_training, name="bn1")
x = tf.nn.relu(x)
x = tf.layers.conv2d(x, output_filters, (3, 3), padding="SAME", name="conv2")
x = tf.layers.batch_normalization(x, training=is_training, name="bn2")
if is_training:
x = x * rand_backward + tf.stop_gradient(x * rand_forward -
x * rand_backward)
else:
x *= 1.0 / hparams.shake_shake_num_branches
return x |
<SYSTEM_TASK:>
Builds a full shake-shake sub layer.
<END_TASK>
<USER_TASK:>
Description:
def shake_shake_block(x, output_filters, stride, hparams):
"""Builds a full shake-shake sub layer.""" |
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
batch_size = common_layers.shape_list(x)[0]
# Generate random numbers for scaling the branches.
rand_forward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(hparams.shake_shake_num_branches)
]
rand_backward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(hparams.shake_shake_num_branches)
]
# Normalize so that all sum to 1.
total_forward = tf.add_n(rand_forward)
total_backward = tf.add_n(rand_backward)
rand_forward = [samp / total_forward for samp in rand_forward]
rand_backward = [samp / total_backward for samp in rand_backward]
zipped_rand = zip(rand_forward, rand_backward)
branches = []
for branch, (r_forward, r_backward) in enumerate(zipped_rand):
with tf.variable_scope("branch_{}".format(branch)):
b = shake_shake_branch(x, output_filters, stride, r_forward, r_backward,
hparams)
b = tf.nn.dropout(b, 1.0 - hparams.layer_prepostprocess_dropout)
branches.append(b)
res = shake_shake_skip_connection(x, output_filters, stride, is_training)
if hparams.shake_shake_concat:
concat_values = [res] + branches
concat_output = tf.concat(values=concat_values, axis=-1)
concat_output = tf.nn.relu(concat_output)
concat_output = tf.layers.conv2d(
concat_output, output_filters, (1, 1), name="concat_1x1")
concat_output = tf.layers.batch_normalization(
concat_output, training=is_training, name="concat_bn")
return concat_output
else:
return res + tf.add_n(branches) |
<SYSTEM_TASK:>
Builds many sub layers into one full layer.
<END_TASK>
<USER_TASK:>
Description:
def shake_shake_layer(x, output_filters, num_blocks, stride, hparams):
"""Builds many sub layers into one full layer.""" |
for block_num in range(num_blocks):
curr_stride = stride if (block_num == 0) else 1
with tf.variable_scope("layer_{}".format(block_num)):
x = shake_shake_block(x, output_filters, curr_stride, hparams)
return x |
<SYSTEM_TASK:>
Check if metric has plateaued.
<END_TASK>
<USER_TASK:>
Description:
def has_metric_plateaued(steps, values, num_steps=100, delta=0.1,
decrease=True):
"""Check if metric has plateaued.
A metric has plateaued if the value has not increased/decreased (depending on
`decrease`) by `delta` for at least `num_steps`.
Args:
steps: list<int> list of global steps for values.
values: list<float> list of metric values.
num_steps: int, number of steps the metric has to have been plateaued for.
delta: float, how much the metric should have changed by over num_steps.
decrease: bool, whether to check if the metric has decreased by delta or
increased by delta.
Returns:
bool, whether the metric has plateaued.
""" |
assert num_steps > 0
if len(steps) < 2:
return False
steps_at_least_num_steps_ago = [
s for s in steps if s <= (steps[-1] - num_steps)
]
if not steps_at_least_num_steps_ago:
# Not enough steps yet
return False
delta_step_idx = len(steps_at_least_num_steps_ago) - 1
start_val = values[delta_step_idx]
values_to_check = values[delta_step_idx:]
observed_deltas = []
for val in values_to_check:
if decrease:
observed_delta = start_val - val
else:
observed_delta = val - start_val
observed_deltas.append(observed_delta)
within_range = [obs < delta for obs in observed_deltas]
return all(within_range) |
<SYSTEM_TASK:>
Default hyperparameters for a DietAdamOptimizer.
<END_TASK>
<USER_TASK:>
Description:
def diet_adam_optimizer_params():
"""Default hyperparameters for a DietAdamOptimizer.
Returns:
a hyperparameters object.
""" |
return hparam.HParams(
quantize=True, # use 16-bit fixed-point
quantization_scale=10.0 / tf.int16.max,
optimizer="DietAdam",
learning_rate=1.0,
learning_rate_warmup_steps=2000,
learning_rate_decay_scheme="noam", # "noam" or "none"
epsilon=1e-10,
beta1=0.0, # we can save memory if beta1=0
beta2=0.98,
factored_second_moment_accumulator=True, # this saves memory
) |
<SYSTEM_TASK:>
A two-layer feed-forward network with relu activation on hidden layer.
<END_TASK>
<USER_TASK:>
Description:
def diet_expert(x, hidden_size, params):
"""A two-layer feed-forward network with relu activation on hidden layer.
Uses diet variables.
Recomputes hidden layer on backprop to save activation memory.
Args:
x: a Tensor with shape [batch, io_size]
hidden_size: an integer
params: a diet variable HParams object.
Returns:
a Tensor with shape [batch, io_size]
""" |
@fn_with_diet_vars(params)
def diet_expert_internal(x):
dim = x.get_shape().as_list()[-1]
h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False)
y = tf.layers.dense(h, dim, use_bias=False)
y *= tf.rsqrt(tf.to_float(dim * hidden_size))
return y
return diet_expert_internal(x) |
<SYSTEM_TASK:>
Quantize x according to params, optionally randomizing the rounding.
<END_TASK>
<USER_TASK:>
Description:
def _quantize(x, params, randomize=True):
"""Quantize x according to params, optionally randomizing the rounding.""" |
if not params.quantize:
return x
if not randomize:
return tf.bitcast(
tf.cast(x / params.quantization_scale, tf.int16), tf.float16)
abs_x = tf.abs(x)
sign_x = tf.sign(x)
y = abs_x / params.quantization_scale
y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
y = tf.minimum(y, tf.int16.max) * sign_x
q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
return q |
<SYSTEM_TASK:>
Dequantize q according to params.
<END_TASK>
<USER_TASK:>
Description:
def _dequantize(q, params):
"""Dequantize q according to params.""" |
if not params.quantize:
return q
return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale |
<SYSTEM_TASK:>
Create a custom variable getter for diet variables according to params.
<END_TASK>
<USER_TASK:>
Description:
def make_diet_var_getter(params):
"""Create a custom variable getter for diet variables according to params.""" |
def diet_var_initializer(shape, dtype, partition_info=None):
"""Initializer for a diet variable."""
del dtype
del partition_info
with common_layers.fn_device_dependency("diet_init") as out_deps:
float_range = math.sqrt(3)
ret = tf.random_uniform(shape, -float_range, float_range)
if params.quantize:
ret = _quantize(ret, params, randomize=False)
out_deps.append(ret)
return ret
def diet_var_getter(getter, **kwargs):
"""Get diet variable and return it dequantized."""
if params.quantize:
kwargs["dtype"] = tf.float16
kwargs["initializer"] = diet_var_initializer
kwargs["trainable"] = False
base_var = getter(**kwargs)
dequantized = _dequantize(base_var, params)
if not hasattr(params, "dequantized"):
params.dequantized = defaultdict(list)
params.dequantized[base_var.name].append(dequantized)
return dequantized
return diet_var_getter |
<SYSTEM_TASK:>
Call function with args; use diet variables according to params.
<END_TASK>
<USER_TASK:>
Description:
def _fn_with_diet_vars(fn, args, params):
"""Call function with args; use diet variables according to params.""" |
vs_ctr = []
def grad_fn(inputs, variables, outputs, output_grads):
"""Custom gradient function."""
del outputs # recomputing below
with common_layers.fn_device_dependency("diet_grad",
output_grads[0].device) as out_dep:
with tf.variable_scope(vs_ctr[0], reuse=True):
outputs = fn(*inputs)
variables = [common_layers.underlying_variable_ref(v) for v in variables]
dequantized_variables = [
params.dequantized[v.name][-1] for v in variables
]
grads = tf.gradients(outputs, inputs + dequantized_variables,
output_grads)
grad_inputs = grads[:len(inputs)]
grad_variables = grads[len(inputs):]
opt = _create_diet_optimizer(params)
# Apply grad_variables here
var_updates = []
for v, dv in zip(variables, grad_variables):
with tf.variable_scope(vs_ctr[0].name):
opt.create_slots(v)
update_op = opt.update_variable(v, dv)
var_updates.append(update_op)
with tf.control_dependencies(var_updates):
grad_inputs = [tf.identity(dx) for dx in grad_inputs]
out_dep.append(grad_inputs)
return grad_inputs, [None] * len(variables)
@common_layers.fn_with_custom_grad(grad_fn, use_global_vars=True)
def forward(*inputs):
with tf.variable_scope(
None, default_name="diet",
custom_getter=make_diet_var_getter(params)) as vs:
vs_ctr.append(vs)
outputs = fn(*inputs)
return outputs
with common_layers.fn_device_dependency("diet_forward",
args[0].device) as out_dep:
outputs = forward(*args)
out_dep.append(outputs)
return outputs |
<SYSTEM_TASK:>
Decorator for graph-building function to use diet variables.
<END_TASK>
<USER_TASK:>
Description:
def fn_with_diet_vars(params):
"""Decorator for graph-building function to use diet variables.""" |
params = copy.copy(params)
def dec(fn):
def wrapped(*args):
return _fn_with_diet_vars(fn, args, params)
return wrapped
return dec |
<SYSTEM_TASK:>
Create the factorized Adam accumulators for diet variables.
<END_TASK>
<USER_TASK:>
Description:
def create_slots(self, var):
"""Create the factorized Adam accumulators for diet variables.""" |
params = self.params
shape = var.get_shape().as_list()
if not hasattr(params, "slots"):
params.slots = defaultdict(dict)
name = var.op.name
slots = params.slots[name]
if params.factored_second_moment_accumulator and len(shape) == 2:
slots["adam_vr"] = tf.get_variable(
name + "_adam_vr", [shape[0], 1],
trainable=False,
initializer=tf.zeros_initializer())
slots["adam_vc"] = tf.get_variable(
name + "_adam_vc", [1, shape[1]],
trainable=False,
initializer=tf.zeros_initializer())
else:
slots["adam_v"] = tf.get_variable(
name + "_adam_v",
shape,
trainable=False,
initializer=tf.zeros_initializer())
if params.beta1 != 0.0:
slots["adam_m"] = tf.get_variable(
name + "_adam_m",
shape,
trainable=False,
initializer=tf.zeros_initializer()) |
<SYSTEM_TASK:>
Update the variable and its slots.
<END_TASK>
<USER_TASK:>
Description:
def update_variable(self, var, grad_var):
"""Update the variable and its slots.""" |
params = self.params
global_step = tf.to_float(self.global_step) + 1
# compute learning rate
lrate = params.learning_rate
if params.learning_rate_decay_scheme == "noam":
lrate *= tf.minimum(global_step * params.learning_rate_warmup_steps**-1.5,
global_step**-0.5)
else:
assert params.learning_rate_decay_scheme == "none"
lrate *= tf.minimum(global_step / params.learning_rate_warmup_steps, 1.0)
# compute adjustment due to second moment
slots = params.slots[var.op.name]
grad_squared = tf.square(grad_var)
beta2_pow = tf.pow(params.beta2, global_step)
if params.factored_second_moment_accumulator and len(var.shape) == 2:
vr_update = tf.assign(slots["adam_vr"], slots["adam_vr"] * params.beta2 +
tf.reduce_mean(grad_squared, 1, keepdims=True) *
(1.0 - params.beta2))
vc_update = tf.assign(slots["adam_vc"], slots["adam_vc"] * params.beta2 +
tf.reduce_mean(grad_squared, 0, keepdims=True) *
(1.0 - params.beta2))
with tf.control_dependencies([vr_update, vc_update]):
vr = tf.sqrt(slots["adam_vr"] / (1.0 - beta2_pow)) + params.epsilon
vc = tf.sqrt(slots["adam_vc"] / (1.0 - beta2_pow)) + params.epsilon
vc /= tf.reduce_mean(vc)
denom = vr * vc
else:
v_update = tf.assign(slots["adam_v"],
slots["adam_v"] * params.beta2 + grad_squared *
(1.0 - params.beta2))
with tf.control_dependencies([v_update]):
denom = tf.sqrt(slots["adam_v"] / (1.0 - beta2_pow)) + params.epsilon
# compute momentum if applicable
if params.beta1 != 0.0:
m_update = tf.assign(slots["adam_m"],
slots["adam_m"] * params.beta1 + grad_var *
(1.0 - params.beta1))
with tf.control_dependencies([m_update]):
grad_var = slots["adam_m"]
# update var
subtrahend = lrate * grad_var / denom
new_val = _quantize(_dequantize(var, params) - subtrahend, params)
return tf.assign(var, new_val) |
<SYSTEM_TASK:>
Generator for the dataset samples.
<END_TASK>
<USER_TASK:>
Description:
def generator_samples(tmp_dir, pb_cst):
"""Generator for the dataset samples.
If not present, download and extract the dataset.
Args:
tmp_dir: path to the directory where to download the dataset.
pb_cst: CodingPbConstants object defining paths
Yields:
A CodingPbInfo object containing the next challenge informations.
""" |
# Step1: Download dataset (eventually)
data_zip_path = generator_utils.maybe_download_from_drive(
directory=tmp_dir,
filename=_DATASET_FILENAME,
url=_DATASET_URL,
)
tf.logging.info("Data downloaded in: {}".format(data_zip_path))
# Step2: Extract dataset
# We could deduce _DATASET_PB_PATH from the zip file (instead of
# hardcoded path)
data_rootdir = os.path.join(tmp_dir, _DATASET_PB_PATH)
if not tf.gfile.Exists(data_rootdir):
with zipfile.ZipFile(data_zip_path, "r") as corpus_zip:
corpus_zip.extractall(tmp_dir)
# We could remove the extracted __MACOSX folder
tf.logging.info("Data extracted in: {}".format(tmp_dir))
else:
tf.logging.info("Data already extracted in: {}".format(tmp_dir))
# Step3: Extract the problems list on the extracted folder
def contains_samples(subdir, dirs, files): # pylint: disable=unused-argument
"""Check that the folder contains a problem."""
return (
_DESC_DIR_NAME in dirs and
pb_cst.code_dir_name in dirs
)
def next_sample(subdir, dirs, files): # pylint: disable=unused-argument
"""Return the filenames of the problem."""
# More could be extracted (like the expected inputs/outputs
# pairs, the problem difficulty, the names of the algorithmic techniques
# needed)
desc_file = os.path.join(subdir, _DESC_DIR_NAME, "description.txt")
code_files = []
# As the dataset is noisy, the program deduce the language from the file
# content.
code_pattern = os.path.join(subdir, pb_cst.code_dir_name, "*.txt")
for f in tf.gfile.Glob(code_pattern):
with tf.gfile.GFile(f, mode="r") as target_file:
# Hack to filter C++/Java files. In theory some python comments could
# make the file be considered as C++ but in practice the chance of
# getting a false negative is low.
content = target_file.read()
if not any(p in content for p in pb_cst.filter_patterns):
code_files.append(f)
return CodingPbInfo(
desc_file=desc_file,
code_files=code_files
)
# The dataset contains problem from two different sources (CodeChef
# and CodeForces). Due to the limited number of samples, all problems from
# both sources are merged
for w in tf.gfile.Walk(data_rootdir):
if contains_samples(*w):
yield next_sample(*w) |
<SYSTEM_TASK:>
Adds a stack of LSTM layers on top of input.
<END_TASK>
<USER_TASK:>
Description:
def lstm(inputs, sequence_length, hparams, train, name, initial_state=None):
"""Adds a stack of LSTM layers on top of input.
Args:
inputs: The input `Tensor`, shaped `[batch_size, time_steps, hidden_size]`.
sequence_length: Lengths of the actual input sequence, excluding padding; a
`Tensor` shaped `[batch_size]`.
hparams: HParams; hyperparameters.
train: bool; `True` when constructing training graph to enable dropout.
name: string; Create variable names under this scope.
initial_state: tuple of `LSTMStateTuple`s; the initial state of each layer.
Returns:
A tuple (outputs, states), where:
outputs: The output `Tensor`, shaped `[batch_size, time_steps,
hidden_size]`.
states: A tuple of `LSTMStateTuple`s; the final state of each layer.
Bidirectional LSTM returns a concatenation of last forward and backward
state, reduced to the original dimensionality.
""" |
layers = [_dropout_lstm_cell(hparams, train)
for _ in range(hparams.num_hidden_layers)]
with tf.variable_scope(name):
return tf.nn.dynamic_rnn(
tf.nn.rnn_cell.MultiRNNCell(layers),
inputs,
sequence_length,
initial_state=initial_state,
dtype=tf.float32,
time_major=False) |
<SYSTEM_TASK:>
The basic LSTM seq2seq model, main step used for training.
<END_TASK>
<USER_TASK:>
Description:
def lstm_seq2seq_internal(inputs, targets, hparams, train):
"""The basic LSTM seq2seq model, main step used for training.""" |
with tf.variable_scope("lstm_seq2seq"):
if inputs is not None:
inputs_length = common_layers.length_from_embedding(inputs)
# Flatten inputs.
inputs = common_layers.flatten4d3d(inputs)
# LSTM encoder.
inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)
_, final_encoder_state = lstm(inputs, inputs_length, hparams, train,
"encoder")
else:
final_encoder_state = None
# LSTM decoder.
shifted_targets = common_layers.shift_right(targets)
# Add 1 to account for the padding added to the left from shift_right
targets_length = common_layers.length_from_embedding(shifted_targets) + 1
decoder_outputs, _ = lstm(
common_layers.flatten4d3d(shifted_targets),
targets_length,
hparams,
train,
"decoder",
initial_state=final_encoder_state)
return tf.expand_dims(decoder_outputs, axis=2) |
<SYSTEM_TASK:>
Hparams for LSTM with area attention.
<END_TASK>
<USER_TASK:>
Description:
def lstm_area_attention_base():
"""Hparams for LSTM with area attention.""" |
hparams = lstm_luong_attention()
hparams.batch_size = 16384
hparams.num_hidden_layers = 2
hparams.hidden_size = 1024
hparams.num_heads = 4
hparams.dropout = 0.2
hparams.learning_rate = 0.1
hparams.max_area_width = 2
hparams.area_key_mode = "mean"
hparams.area_value_mode = "sum"
return hparams |
<SYSTEM_TASK:>
Transform a string with a filename into a list of float32.
<END_TASK>
<USER_TASK:>
Description:
def encode(self, s):
"""Transform a string with a filename into a list of float32.
Args:
s: path to the file with a waveform.
Returns:
samples: list of int16s
""" |
# Make sure that the data is a single channel, 16bit, 16kHz wave.
# TODO(chorowski): the directory may not be writable, this should fallback
# to a temp path, and provide instructions for installing sox.
if s.endswith(".mp3"):
# TODO(dliebling) On Linux, check if libsox-fmt-mp3 is installed.
out_filepath = s[:-4] + ".wav"
call([
"sox", "--guard", s, "-r", "16k", "-b", "16", "-c", "1", out_filepath
])
s = out_filepath
elif not s.endswith(".wav"):
out_filepath = s + ".wav"
if not os.path.exists(out_filepath):
call(["sox", "-r", "16k", "-b", "16", "-c", "1", s, out_filepath])
s = out_filepath
rate, data = wavfile.read(s)
assert rate == self._sample_rate
assert len(data.shape) == 1
if data.dtype not in [np.float32, np.float64]:
data = data.astype(np.float32) / np.iinfo(data.dtype).max
return data.tolist() |
<SYSTEM_TASK:>
Transform a sequence of float32 into a waveform.
<END_TASK>
<USER_TASK:>
Description:
def decode(self, ids):
"""Transform a sequence of float32 into a waveform.
Args:
ids: list of integers to be converted.
Returns:
Path to the temporary file where the waveform was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
""" |
_, tmp_file_path = tempfile.mkstemp()
wavfile.write(tmp_file_path, self._sample_rate, np.asarray(ids))
return tmp_file_path |
<SYSTEM_TASK:>
Returns or Creates a Vertex mapped by key.
<END_TASK>
<USER_TASK:>
Description:
def get_vertex(self, key):
"""Returns or Creates a Vertex mapped by key.
Args:
key: A string reference for a vertex. May refer to a new Vertex in which
case it will be created.
Returns:
A the Vertex mapped to by key.
""" |
if key in self.vertex_map:
return self.vertex_map[key]
vertex = self.new_vertex()
self.vertex_map[key] = vertex
return vertex |
<SYSTEM_TASK:>
Returns a simplified dictionary representing the Graph.
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
"""Returns a simplified dictionary representing the Graph.
Returns:
A dictionary that can easily be serialized to JSON.
""" |
return {
"node": [v.to_dict() for v in self.vertices],
"edge": [e.to_dict() for e in self.edges]
} |
<SYSTEM_TASK:>
Self-attention layer with source as memory antecedent.
<END_TASK>
<USER_TASK:>
Description:
def attend(x, source, hparams, name):
"""Self-attention layer with source as memory antecedent.""" |
with tf.variable_scope(name):
x = tf.squeeze(x, axis=2)
if len(source.get_shape()) > 3:
source = tf.squeeze(source, axis=2)
source = common_attention.add_timing_signal_1d(source)
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams), source, None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads,
hparams.attention_dropout)
res = common_layers.layer_postprocess(x, y, hparams)
return tf.expand_dims(res, axis=2) |
<SYSTEM_TASK:>
Sample from the latent space in the autoencoder.
<END_TASK>
<USER_TASK:>
Description:
def ae_latent_sample(latents_dense, inputs, ed, embed, iters, hparams):
"""Sample from the latent space in the autoencoder.""" |
if hparams.num_decode_blocks < 2 and hparams.sampling_temp == 0.0:
# TODO(lukaszkaiser): beam-search only works in non-blocked mode for now.
tf.logging.info("Running beam-search for latents with beam size 1.")
return ae_latent_sample_beam(latents_dense, inputs, ed, embed, hparams)
latents_pred = decode_transformer(inputs, ed, latents_dense, hparams, "extra")
latents_discrete, _ = ae_latent_softmax(latents_pred, None, hparams)
def next_bit(latents_discrete, i):
latents_discrete_prev = latents_discrete
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
latents_dense = embed(latents_discrete)
latents_pred = decode_transformer(
inputs, ed, latents_dense, hparams, "extra")
latents_discrete, _ = ae_latent_softmax(latents_pred, None, hparams)
return tf.concat([latents_discrete_prev[:, :(i+1), :],
latents_discrete[:, (i+1):, :]], axis=1)
for i in range(iters):
latents_discrete = next_bit(latents_discrete, i)
return latents_discrete |
<SYSTEM_TASK:>
Hyperparameters for CIFAR-10 experiments.
<END_TASK>
<USER_TASK:>
Description:
def imagetransformer_ae_cifar():
"""Hyperparameters for CIFAR-10 experiments.""" |
hparams = transformer_ae_small()
hparams.filter_size = 512
hparams.num_compress_steps = 3
hparams.startup_steps = 10000
hparams.is_2d = 0
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate = 0.2
hparams.hidden_size = 512
hparams.batch_size = 1
hparams.max_length = 256
hparams.dropout = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.initializer_gain = 0.2
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.label_smoothing = 0.0
hparams.norm_type = "layer"
hparams.layer_prepostprocess_dropout = 0.0
hparams.num_heads = 8
hparams.task = "image"
hparams.ffn_layer = "conv_hidden_relu"
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.
hparams.pos = "timing" # timing, none
hparams.nbr_decoder_problems = 1
hparams.num_output_layers = 3
# TODO(trandustin): semhash doesn't work if filter_size != hidden_size. For
# now, set default to dvq.
hparams.bottleneck_kind = "dvq"
hparams.add_hparam("block_size", 1)
# dilated attention based flags
hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64])
hparams.add_hparam("dilated_attention", False)
# image size related flags
# assuming that the image has same height and width
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
# Local attention params
hparams.add_hparam("local_and_global_att", False)
hparams.add_hparam("block_length", 256)
hparams.add_hparam("block_width", 128)
hparams.num_encoder_layers = 4
hparams.num_decoder_layers = 12
hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D)
hparams.add_hparam("block_raster_scan", False)
hparams.add_hparam("shared_rel", False)
# multipos attention params
hparams.add_hparam("q_filter_width", 1)
hparams.add_hparam("kv_filter_width", 1)
hparams.add_hparam("unconditional", False) # unconditional generation
hparams.bottom["targets"] = modalities.image_channel_embeddings_bottom
hparams.top["targets"] = modalities.image_channel_embeddings_top
hparams.drop_inputs = True
hparams.do_attend_compress = False
hparams.do_attend_decompress = False
return hparams |
<SYSTEM_TASK:>
Get the layers module good for TF 1 and TF 2 work for now.
<END_TASK>
<USER_TASK:>
Description:
def layers():
"""Get the layers module good for TF 1 and TF 2 work for now.""" |
global _cached_layers
if _cached_layers is not None:
return _cached_layers
layers_module = tf.layers
try:
from tensorflow.python import tf2 # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
if tf2.enabled():
tf.logging.info("Running in V2 mode, using Keras layers.")
layers_module = tf.keras.layers
except ImportError:
pass
_cached_layers = layers_module
return layers_module |
<SYSTEM_TASK:>
Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
<END_TASK>
<USER_TASK:>
Description:
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):
"""Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x.
The probability that each element is kept.
broadcast_dims: an optional list of integers
the dimensions along which to broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x.
""" |
assert "noise_shape" not in kwargs
if broadcast_dims:
shape = tf.shape(x)
ndims = len(x.get_shape())
# Allow dimensions like "-1" as well.
broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]
kwargs["noise_shape"] = [
1 if i in broadcast_dims else shape[i] for i in range(ndims)
]
return tf.nn.dropout(x, keep_prob, **kwargs) |
<SYSTEM_TASK:>
Inverse-decay exponentially from 0.01 to 1.0 reached at max_step.
<END_TASK>
<USER_TASK:>
Description:
def inverse_exp_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay exponentially from 0.01 to 1.0 reached at max_step.""" |
inv_base = tf.exp(tf.log(min_value) / float(max_step))
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = to_float(step)
return inv_base**tf.maximum(float(max_step) - step, 0.0) |
<SYSTEM_TASK:>
Inverse-decay linearly from 0.01 to 1.0 reached at max_step.
<END_TASK>
<USER_TASK:>
Description:
def inverse_lin_decay(max_step, min_value=0.01, step=None):
"""Inverse-decay linearly from 0.01 to 1.0 reached at max_step.""" |
if step is None:
step = tf.train.get_global_step()
if step is None:
return 1.0
step = to_float(step)
progress = tf.minimum(step / float(max_step), 1.0)
return progress * (1.0 - min_value) + min_value |
<SYSTEM_TASK:>
Multi-argument shake-shake, currently approximated by sums of 2.
<END_TASK>
<USER_TASK:>
Description:
def shakeshake(xs, equal_grad=False):
"""Multi-argument shake-shake, currently approximated by sums of 2.""" |
if len(xs) == 1:
return xs[0]
div = (len(xs) + 1) // 2
arg1 = shakeshake(xs[:div], equal_grad=equal_grad)
arg2 = shakeshake(xs[div:], equal_grad=equal_grad)
if equal_grad:
return shakeshake2_eqgrad(arg1, arg2)
return shakeshake2(arg1, arg2) |
<SYSTEM_TASK:>
Image standardization on batches and videos.
<END_TASK>
<USER_TASK:>
Description:
def standardize_images(x):
"""Image standardization on batches and videos.""" |
with tf.name_scope("standardize_images", values=[x]):
x_shape = shape_list(x)
x = to_float(tf.reshape(x, [-1] + x_shape[-3:]))
x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_variance = tf.reduce_mean(
tf.squared_difference(x, x_mean), axis=[1, 2], keepdims=True)
num_pixels = to_float(x_shape[-2] * x_shape[-3])
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
return tf.reshape(x, x_shape) |
<SYSTEM_TASK:>
Flatten a 4d-tensor into a 3d-tensor by joining width and height.
<END_TASK>
<USER_TASK:>
Description:
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height.""" |
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result |
<SYSTEM_TASK:>
Version of tf.gather that works faster on tpu.
<END_TASK>
<USER_TASK:>
Description:
def gather(params, indices, dtype=tf.float32):
"""Version of tf.gather that works faster on tpu.""" |
if not is_xla_compiled():
return tf.gather(params, indices)
vocab_size = params.get_shape().as_list()[0]
indices_flat = tf.reshape(indices, [-1])
out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
out = reshape_like(out, tf.expand_dims(indices, -1))
return out |
<SYSTEM_TASK:>
TPU hack for tf.cumsum.
<END_TASK>
<USER_TASK:>
Description:
def cumsum(x, axis=0, exclusive=False):
"""TPU hack for tf.cumsum.
This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
the axis dimension is very large.
Args:
x: a Tensor
axis: an integer
exclusive: a boolean
Returns:
Tensor of the same shape as x.
""" |
if not is_xla_compiled():
return tf.cumsum(x, axis=axis, exclusive=exclusive)
x_shape = shape_list(x)
rank = len(x_shape)
length = x_shape[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
ret = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != rank - 1:
ret = tf.transpose(
ret,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return ret |
<SYSTEM_TASK:>
Like tf.nn.dropout, but does not scale up. Works on integers also.
<END_TASK>
<USER_TASK:>
Description:
def dropout_no_scaling(x, keep_prob):
"""Like tf.nn.dropout, but does not scale up. Works on integers also.
Args:
x: a Tensor
keep_prob: a floating point number
Returns:
Tensor of the same shape as x.
""" |
if keep_prob == 1.0:
return x
mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob)
return x * cast_like(mask, x) |
<SYSTEM_TASK:>
Embed x of type int64 into dense vectors, reducing to max 4 dimensions.
<END_TASK>
<USER_TASK:>
Description:
def embedding(x,
vocab_size,
dense_size,
name=None,
reuse=None,
multiplier=1.0,
symbol_dropout_rate=0.0,
embedding_var=None,
dtype=tf.float32):
"""Embed x of type int64 into dense vectors, reducing to max 4 dimensions.""" |
with tf.variable_scope(
name, default_name="embedding", values=[x], reuse=reuse, dtype=dtype):
if embedding_var is None:
embedding_var = tf.get_variable("kernel", [vocab_size, dense_size])
# On the backwards pass, we want to convert the gradient from
# an indexed-slices to a regular tensor before sending it back to the
# parameter server. This avoids excess computation on the parameter server.
if not tf.executing_eagerly():
embedding_var = convert_gradient_to_tensor(embedding_var)
x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate)
emb_x = gather(embedding_var, x, dtype)
if multiplier != 1.0:
emb_x *= multiplier
static_shape = emb_x.shape.as_list()
if len(static_shape) < 5:
return emb_x
assert len(static_shape) == 5
# If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1.
return tf.squeeze(emb_x, 3) |
<SYSTEM_TASK:>
Use a strided convolution to downsample x by 2, `nbr_steps` times.
<END_TASK>
<USER_TASK:>
Description:
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
"""Use a strided convolution to downsample x by 2, `nbr_steps` times.
We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
As detailed in http://distill.pub/2016/deconv-checkerboard/.
Args:
x: a `Tensor` with shape `[batch, spatial, depth]` or
`[batch, spatial_1, spatial_2, depth]`
nbr_steps: number of halving downsample rounds to apply
output_filters: an int specifying the filter count for the convolutions
name: a string
reuse: a boolean
Returns:
a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
`[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
output_filters]`
""" |
with tf.variable_scope(
name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
if nbr_steps == 0:
out = conv(x, output_filters, (1, 1))
return out, [out]
hidden_layers = [x]
for i in range(nbr_steps):
hidden_layers.append(
conv(
hidden_layers[-1],
output_filters, (2, 2),
strides=2,
activation=tf.nn.relu,
name="conv" + str(i)))
return hidden_layers[-1], hidden_layers |
<SYSTEM_TASK:>
Conditional conv_fn making kernel 1d or 2d depending on inputs shape.
<END_TASK>
<USER_TASK:>
Description:
def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):
"""Conditional conv_fn making kernel 1d or 2d depending on inputs shape.""" |
static_shape = inputs.get_shape()
if not static_shape or len(static_shape) != 4:
raise ValueError("Inputs to conv must have statically known rank 4. "
"Shape: " + str(static_shape))
# Add support for left padding.
if kwargs.get("padding") == "LEFT":
dilation_rate = (1, 1)
if "dilation_rate" in kwargs:
dilation_rate = kwargs["dilation_rate"]
assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1
height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]
cond_padding = tf.cond(
tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))
width_padding = 0 if static_shape[2] == 1 else cond_padding
padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
inputs = tf.pad(inputs, padding)
# Set middle two dimensions to None to prevent convolution from complaining
inputs.set_shape([static_shape[0], None, None, static_shape[3]])
kwargs["padding"] = "VALID"
def conv2d_kernel(kernel_size_arg, name_suffix):
"""Call conv2d but add suffix to name."""
name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix)
original_name = kwargs.pop("name", None)
original_force2d = kwargs.pop("force2d", None)
result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)
if original_name is not None:
kwargs["name"] = original_name # Restore for other calls.
if original_force2d is not None:
kwargs["force2d"] = original_force2d
return result
return conv2d_kernel(kernel_size, "single") |
<SYSTEM_TASK:>
Sub-separable convolution. If separability == 0 it's a separable_conv.
<END_TASK>
<USER_TASK:>
Description:
def subseparable_conv(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution. If separability == 0 it's a separable_conv.""" |
def conv_fn(inputs, filters, kernel_size, **kwargs):
"""Sub-separable convolution, splits into separability-many blocks."""
separability = None
if "separability" in kwargs:
separability = kwargs.pop("separability")
if separability:
parts = []
abs_sep = separability if separability > 0 else -1 * separability
for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)):
with tf.variable_scope("part_%d" % split_idx):
if separability > 0:
parts.append(
layers().Conv2D(filters // separability, kernel_size,
**kwargs)(split))
else:
parts.append(
layers().SeparableConv2D(filters // abs_sep,
kernel_size, **kwargs)(split))
if separability > 1:
result = layers().Conv2D(filters, (1, 1))(tf.concat(parts, axis=3))
elif abs_sep == 1: # If we have just one block, return it.
assert len(parts) == 1
result = parts[0]
else:
result = tf.concat(parts, axis=3)
else:
result = layers().SeparableConv2D(filters, kernel_size,
**kwargs)(inputs)
if separability is not None:
kwargs["separability"] = separability
return result
return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs) |
<SYSTEM_TASK:>
Layer normalize the tensor x, averaging over the last dimension.
<END_TASK>
<USER_TASK:>
Description:
def layer_norm(x,
filters=None,
epsilon=1e-6,
name=None,
reuse=None,
layer_collection=None):
"""Layer normalize the tensor x, averaging over the last dimension.""" |
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale, bias = layer_norm_vars(filters)
return layer_norm_compute(x, epsilon, scale, bias,
layer_collection=layer_collection) |
<SYSTEM_TASK:>
Layer normalization with l2 norm.
<END_TASK>
<USER_TASK:>
Description:
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm.""" |
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias |
<SYSTEM_TASK:>
Normalizes x using the spectral norm.
<END_TASK>
<USER_TASK:>
Description:
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
""" |
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op |
<SYSTEM_TASK:>
Resnet connection with zero initialization.
<END_TASK>
<USER_TASK:>
Description:
def zero_add(previous_value, x, name=None, reuse=None):
"""Resnet connection with zero initialization.
Another type of resnet connection which returns previous_value + gamma * x.
gamma is a trainable scalar and initialized with zero. It is useful when a
module is plugged into a trained model and we want to make sure it matches the
original model's performance.
Args:
previous_value: A tensor.
x: A tensor.
name: name of variable scope; defaults to zero_add.
reuse: reuse scope.
Returns:
previous_value + gamma * x.
""" |
with tf.variable_scope(name, default_name="zero_add", reuse=reuse):
gamma = tf.get_variable("gamma", (), initializer=tf.zeros_initializer())
return previous_value + gamma * x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.