INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`. | def targeted_dropout(inputs,
k,
keep_prob,
targeting_fn,
is_training,
do_prune=False):
"""Applies targeted dropout.
Applies dropout at a rate of `1 - keep_prob` to only those elements of
`inputs` marked by `targeting_fn`. See below and paper for more detail:
"Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang,
Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton.
Args:
inputs: Tensor, inputs to apply targeted dropout to.
k: Scalar Tensor or python scalar, sets the number of elements to target in
`inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with
second argument of `targeting_fn`.
keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument.
targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a
boolean mask the same shape as `inputs` where True indicates an element
will be dropped, and False not.
is_training: bool, indicates whether currently training.
do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)`
elements of `inputs` expected to be dropped each forwards pass.
Returns:
Tensor, same shape and dtype as `inputs`.
"""
if not is_training and do_prune:
k = tf.round(to_float(k) * to_float(1. - keep_prob))
mask = targeting_fn(inputs, k)
mask = tf.cast(mask, inputs.dtype)
if is_training:
return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask
elif do_prune:
return inputs * (1 - mask)
else:
return inputs |
KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_var: log(var) parameter of the distribution.
mu_p: optional mu from a learned prior distribution
log_var_p: optional log(var) from a learned prior distribution
Returns:
the KL loss. | def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0):
"""KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_var: log(var) parameter of the distribution.
mu_p: optional mu from a learned prior distribution
log_var_p: optional log(var) from a learned prior distribution
Returns:
the KL loss.
"""
batch_size = shape_list(mu)[0]
prior_distribution = tfp.distributions.Normal(
mu_p, tf.exp(tf.multiply(0.5, log_var_p)))
posterior_distribution = tfp.distributions.Normal(
mu, tf.exp(tf.multiply(0.5, log_var)))
kld = tfp.distributions.kl_divergence(posterior_distribution,
prior_distribution)
return tf.reduce_sum(kld) / to_float(batch_size) |
Convert to Tensor. | def to_tensor(self):
"""Convert to Tensor."""
a_shape = shape_list(self.a)
b_shape = shape_list(self.b)
inner_dim = b_shape[1]
result_dim = b_shape[0]
flat_a = tf.reshape(self.a, [-1, inner_dim])
product = tf.matmul(flat_a, self.b, transpose_b=True)
product_shape = a_shape[:-1] + [result_dim]
product = tf.reshape(product, product_shape)
product.set_shape(self.a.get_shape().as_list()[:-1] +
[self.b.get_shape()[0]])
return product |
Generate weights with normalization. | def _compute_weights(self):
"""Generate weights with normalization."""
with tf.variable_scope("compute_weights"):
self.layer.kernel = tf.nn.l2_normalize(
self.layer.v, axis=self.norm_axes) * self.layer.g |
Set the norm of the weight vector. | def _init_norm(self, weights):
"""Set the norm of the weight vector."""
with tf.variable_scope("init_norm"):
flat = tf.reshape(weights, [-1, self.layer_depth])
return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,)) |
Data dependent initialization for eager execution. | def _data_dep_init(self, inputs):
"""Data dependent initialization for eager execution."""
with tf.variable_scope("data_dep_init"):
# Generate data dependent init values
activation = self.layer.activation
self.layer.activation = None
x_init = self.layer.call(inputs)
m_init, v_init = tf.moments(x_init, self.norm_axes)
scale_init = 1. / tf.sqrt(v_init + 1e-10)
# Assign data dependent init values
self.layer.g = self.layer.g * scale_init
self.layer.bias = (-m_init * scale_init)
self.layer.activation = activation
self.initialized = True |
Build `Layer`. | def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = layers().InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True |
Call `Layer`. | def call(self, inputs):
"""Call `Layer`."""
# if context.executing_eagerly():
# if not self.initialized:
# self._data_dep_init(inputs)
self._compute_weights() # Recompute weights for each forward pass
output = self.layer.call(inputs)
return output |
Evaluate the PPO agent in the real environment. | def evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir,
eval_fn=_eval_fn_with_learner
):
"""Evaluate the PPO agent in the real environment."""
tf.logging.info("Evaluating metric %s", get_metric_name(
sampling_temp, max_num_noops, clipped=False
))
eval_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
env = setup_env(
hparams, batch_size=hparams.eval_batch_size, max_num_noops=max_num_noops,
rl_env_max_episode_steps=hparams.eval_rl_env_max_episode_steps,
env_name=hparams.rl_env_name)
env.start_new_epoch(0)
eval_fn(env, hparams, eval_hparams, agent_model_dir, sampling_temp)
rollouts = env.current_epoch_rollouts()
env.close()
return tuple(
compute_mean_reward(rollouts, clipped) for clipped in (True, False)
) |
Calculate mean rewards from given epoch. | def compute_mean_reward(rollouts, clipped):
"""Calculate mean rewards from given epoch."""
reward_name = "reward" if clipped else "unclipped_reward"
rewards = []
for rollout in rollouts:
if rollout[-1].done:
rollout_reward = sum(getattr(frame, reward_name) for frame in rollout)
rewards.append(rollout_reward)
if rewards:
mean_rewards = np.mean(rewards)
else:
mean_rewards = 0
return mean_rewards |
Evaluate the agent with multiple eval configurations. | def evaluate_all_configs(
hparams, agent_model_dir, eval_fn=_eval_fn_with_learner
):
"""Evaluate the agent with multiple eval configurations."""
metrics = {}
# Iterate over all combinations of sampling temperatures and whether to do
# initial no-ops.
for sampling_temp in hparams.eval_sampling_temps:
# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.
for max_num_noops in set([hparams.eval_max_num_noops, 0]):
scores = evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn
)
for (score, clipped) in zip(scores, (True, False)):
metric_name = get_metric_name(sampling_temp, max_num_noops, clipped)
metrics[metric_name] = score
return metrics |
Evaluate the world model (reward accuracy). | def evaluate_world_model(
real_env, hparams, world_model_dir, debug_video_path,
split=tf.estimator.ModeKeys.EVAL,
):
"""Evaluate the world model (reward accuracy)."""
frame_stack_size = hparams.frame_stack_size
rollout_subsequences = []
def initial_frame_chooser(batch_size):
assert batch_size == len(rollout_subsequences)
return np.stack([
[frame.observation.decode() for frame in subsequence[:frame_stack_size]] # pylint: disable=g-complex-comprehension
for subsequence in rollout_subsequences
])
env_fn = rl.make_simulated_env_fn_from_hparams(
real_env, hparams, batch_size=hparams.wm_eval_batch_size,
initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir
)
sim_env = env_fn(in_graph=False)
subsequence_length = int(
max(hparams.wm_eval_rollout_ratios) * hparams.simulated_rollout_length
)
rollouts = real_env.current_epoch_rollouts(
split=split,
minimal_rollout_frames=(subsequence_length + frame_stack_size)
)
video_writer = common_video.WholeVideoWriter(
fps=10, output_path=debug_video_path, file_format="avi"
)
reward_accuracies_by_length = {
int(ratio * hparams.simulated_rollout_length): []
for ratio in hparams.wm_eval_rollout_ratios
}
for _ in range(hparams.wm_eval_num_batches):
rollout_subsequences[:] = random_rollout_subsequences(
rollouts, hparams.wm_eval_batch_size,
subsequence_length + frame_stack_size
)
eval_subsequences = [
subsequence[(frame_stack_size - 1):]
for subsequence in rollout_subsequences
]
# Check that the initial observation is the same in the real and simulated
# rollout.
sim_init_obs = sim_env.reset()
def decode_real_obs(index):
return np.stack([
subsequence[index].observation.decode()
for subsequence in eval_subsequences # pylint: disable=cell-var-from-loop
])
real_init_obs = decode_real_obs(0)
assert np.all(sim_init_obs == real_init_obs)
debug_frame_batches = []
def append_debug_frame_batch(sim_obs, real_obs, sim_cum_rews,
real_cum_rews, sim_rews, real_rews):
"""Add a debug frame."""
rews = [[sim_cum_rews, sim_rews], [real_cum_rews, real_rews]]
headers = []
for j in range(len(sim_obs)):
local_nps = []
for i in range(2):
img = PIL_Image().new("RGB", (sim_obs.shape[-2], 11),)
draw = PIL_ImageDraw().Draw(img)
draw.text((0, 0), "c:{:3}, r:{:3}".format(int(rews[i][0][j]),
int(rews[i][1][j])),
fill=(255, 0, 0))
local_nps.append(np.asarray(img))
local_nps.append(np.zeros_like(local_nps[0]))
headers.append(np.concatenate(local_nps, axis=1))
errs = absolute_hinge_difference(sim_obs, real_obs)
headers = np.stack(headers)
debug_frame_batches.append( # pylint: disable=cell-var-from-loop
np.concatenate([headers,
np.concatenate([sim_obs, real_obs, errs], axis=2)],
axis=1)
)
append_debug_frame_batch(sim_init_obs, real_init_obs,
np.zeros(hparams.wm_eval_batch_size),
np.zeros(hparams.wm_eval_batch_size),
np.zeros(hparams.wm_eval_batch_size),
np.zeros(hparams.wm_eval_batch_size))
(sim_cum_rewards, real_cum_rewards) = (
np.zeros(hparams.wm_eval_batch_size) for _ in range(2)
)
for i in range(subsequence_length):
actions = [subsequence[i].action for subsequence in eval_subsequences]
(sim_obs, sim_rewards, _) = sim_env.step(actions)
sim_cum_rewards += sim_rewards
real_rewards = np.array([
subsequence[i + 1].reward for subsequence in eval_subsequences
])
real_cum_rewards += real_rewards
for (length, reward_accuracies) in six.iteritems(
reward_accuracies_by_length
):
if i + 1 == length:
reward_accuracies.append(
np.sum(sim_cum_rewards == real_cum_rewards) /
len(real_cum_rewards)
)
real_obs = decode_real_obs(i + 1)
append_debug_frame_batch(sim_obs, real_obs, sim_cum_rewards,
real_cum_rewards, sim_rewards, real_rewards)
for debug_frames in np.stack(debug_frame_batches, axis=1):
debug_frame = None
for debug_frame in debug_frames:
video_writer.write(debug_frame)
if debug_frame is not None:
# Append two black frames for aesthetics.
for _ in range(2):
video_writer.write(np.zeros_like(debug_frame))
video_writer.finish_to_disk()
return {
"reward_accuracy/at_{}".format(length): np.mean(reward_accuracies)
for (length, reward_accuracies) in six.iteritems(
reward_accuracies_by_length
)
} |
Write metrics to summary. | def summarize_metrics(eval_metrics_writer, metrics, epoch):
"""Write metrics to summary."""
for (name, value) in six.iteritems(metrics):
summary = tf.Summary()
summary.value.add(tag=name, simple_value=value)
eval_metrics_writer.add_summary(summary, epoch)
eval_metrics_writer.flush() |
CamelCase game name with mode suffix.
Args:
short_name: snake_case name without mode e.g "crazy_climber"
Returns:
full game name e.g. "CrazyClimberNoFrameskip-v4" | def full_game_name(short_name):
"""CamelCase game name with mode suffix.
Args:
short_name: snake_case name without mode e.g "crazy_climber"
Returns:
full game name e.g. "CrazyClimberNoFrameskip-v4"
"""
camel_game_name = misc_utils.snakecase_to_camelcase(short_name)
full_name = camel_game_name + ATARI_GAME_MODE
return full_name |
Setup. | def setup_env(hparams,
batch_size,
max_num_noops,
rl_env_max_episode_steps=-1,
env_name=None):
"""Setup."""
if not env_name:
env_name = full_game_name(hparams.game)
maxskip_envs = should_apply_max_and_skip_env(hparams)
env = T2TGymEnv(
base_env_name=env_name,
batch_size=batch_size,
grayscale=hparams.grayscale,
should_derive_observation_space=hparams
.rl_should_derive_observation_space,
resize_width_factor=hparams.resize_width_factor,
resize_height_factor=hparams.resize_height_factor,
rl_env_max_episode_steps=rl_env_max_episode_steps,
max_num_noops=max_num_noops,
maxskip_envs=maxskip_envs,
sticky_actions=hparams.sticky_actions
)
return env |
Copy a subset of hparams to target_hparams. | def update_hparams_from_hparams(target_hparams, source_hparams, prefix):
"""Copy a subset of hparams to target_hparams."""
for (param_name, param_value) in six.iteritems(source_hparams.values()):
if param_name.startswith(prefix):
target_hparams.set_hparam(param_name[len(prefix):], param_value) |
Chooses a random frame sequence of given length from a set of rollouts. | def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length):
"""Chooses a random frame sequence of given length from a set of rollouts."""
def choose_subsequence():
# TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over
# frames and not rollouts.
rollout = random.choice(rollouts)
try:
from_index = random.randrange(len(rollout) - subsequence_length + 1)
except ValueError:
# Rollout too short; repeat.
return choose_subsequence()
return rollout[from_index:(from_index + subsequence_length)]
return [choose_subsequence() for _ in range(num_subsequences)] |
Make frame chooser.
Args:
real_env: T2TEnv to take initial frames from.
frame_stack_size (int): Number of consecutive frames to extract.
simulation_random_starts (bool): Whether to choose frames at random.
simulation_flip_first_random_for_beginning (bool): Whether to flip the first
frame stack in every batch for the frames at the beginning.
split (tf.estimator.ModeKeys or None): Data split to take the frames from,
None means use all frames.
Returns:
Function batch_size -> initial_frames. | def make_initial_frame_chooser(
real_env, frame_stack_size, simulation_random_starts,
simulation_flip_first_random_for_beginning,
split=tf.estimator.ModeKeys.TRAIN,
):
"""Make frame chooser.
Args:
real_env: T2TEnv to take initial frames from.
frame_stack_size (int): Number of consecutive frames to extract.
simulation_random_starts (bool): Whether to choose frames at random.
simulation_flip_first_random_for_beginning (bool): Whether to flip the first
frame stack in every batch for the frames at the beginning.
split (tf.estimator.ModeKeys or None): Data split to take the frames from,
None means use all frames.
Returns:
Function batch_size -> initial_frames.
"""
initial_frame_rollouts = real_env.current_epoch_rollouts(
split=split, minimal_rollout_frames=frame_stack_size,
)
def initial_frame_chooser(batch_size):
"""Frame chooser."""
deterministic_initial_frames =\
initial_frame_rollouts[0][:frame_stack_size]
if not simulation_random_starts:
# Deterministic starts: repeat first frames from the first rollout.
initial_frames = [deterministic_initial_frames] * batch_size
else:
# Random starts: choose random initial frames from random rollouts.
initial_frames = random_rollout_subsequences(
initial_frame_rollouts, batch_size, frame_stack_size
)
if simulation_flip_first_random_for_beginning:
# Flip first entry in the batch for deterministic initial frames.
initial_frames[0] = deterministic_initial_frames
return np.stack([
[frame.observation.decode() for frame in initial_frame_stack] # pylint: disable=g-complex-comprehension
for initial_frame_stack in initial_frames
])
return initial_frame_chooser |
Point-wise, hinge loss-like, difference between arrays.
Args:
arr1: integer array to compare.
arr2: integer array to compare.
min_diff: minimal difference taken into consideration.
dtype: dtype of returned array.
Returns:
array | def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8):
"""Point-wise, hinge loss-like, difference between arrays.
Args:
arr1: integer array to compare.
arr2: integer array to compare.
min_diff: minimal difference taken into consideration.
dtype: dtype of returned array.
Returns:
array
"""
diff = np.abs(arr1.astype(np.int) - arr2, dtype=np.int)
return np.maximum(diff - min_diff, 0).astype(dtype) |
Augments an observation with debug info. | def augment_observation(
observation, reward, cum_reward, frame_index, bar_color=None,
header_height=27
):
"""Augments an observation with debug info."""
img = PIL_Image().new(
"RGB", (observation.shape[1], header_height,)
)
draw = PIL_ImageDraw().Draw(img)
draw.text(
(1, 0), "c:{:3}, r:{:3}".format(int(cum_reward), int(reward)),
fill=(255, 0, 0)
)
draw.text(
(1, 15), "f:{:3}".format(int(frame_index)),
fill=(255, 0, 0)
)
header = np.copy(np.asarray(img))
del img
if bar_color is not None:
header[0, :, :] = bar_color
return np.concatenate([header, observation], axis=0) |
Runs a batch of rollouts from given initial observations. | def run_rollouts(
env, agent, initial_observations, step_limit=None, discount_factor=1.0,
log_every_steps=None, video_writers=(), color_bar=False,
many_rollouts_from_each_env=False
):
"""Runs a batch of rollouts from given initial observations."""
assert step_limit is not None or not many_rollouts_from_each_env, (
"When collecting many rollouts from each environment, time limit must "
"be set."
)
num_dones = 0
first_dones = np.array([False] * env.batch_size)
observations = initial_observations
step_index = 0
cum_rewards = np.zeros(env.batch_size)
for (video_writer, obs_stack) in zip(video_writers, initial_observations):
for (i, ob) in enumerate(obs_stack):
debug_frame = augment_observation(
ob, reward=0, cum_reward=0, frame_index=(-len(obs_stack) + i + 1),
bar_color=((0, 255, 0) if color_bar else None)
)
video_writer.write(debug_frame)
def proceed():
if step_index < step_limit:
return num_dones < env.batch_size or many_rollouts_from_each_env
else:
return False
while proceed():
act_kwargs = {}
if agent.needs_env_state:
act_kwargs["env_state"] = env.state
actions = agent.act(observations, **act_kwargs)
(observations, rewards, dones) = env.step(actions)
observations = list(observations)
now_done_indices = []
for (i, done) in enumerate(dones):
if done and (not first_dones[i] or many_rollouts_from_each_env):
now_done_indices.append(i)
first_dones[i] = True
num_dones += 1
if now_done_indices:
# Unless many_rollouts_from_each_env, reset only envs done the first time
# in this timestep to ensure that we collect exactly 1 rollout from each
# env.
reset_observations = env.reset(now_done_indices)
for (i, observation) in zip(now_done_indices, reset_observations):
observations[i] = observation
observations = np.array(observations)
cum_rewards[~first_dones] = (
cum_rewards[~first_dones] * discount_factor + rewards[~first_dones]
)
step_index += 1
for (video_writer, obs_stack, reward, cum_reward, done) in zip(
video_writers, observations, rewards, cum_rewards, first_dones
):
if done:
continue
ob = obs_stack[-1]
debug_frame = augment_observation(
ob, reward=reward, cum_reward=cum_reward,
frame_index=step_index, bar_color=((255, 0, 0) if color_bar else None)
)
video_writer.write(debug_frame)
# TODO(afrozm): Clean this up with tf.logging.log_every_n
if log_every_steps is not None and step_index % log_every_steps == 0:
tf.logging.info("Step %d, mean_score: %f", step_index, cum_rewards.mean())
return (observations, cum_rewards) |
Sets the state that will be used on next reset. | def set_initial_state(self, initial_state, initial_frames):
"""Sets the state that will be used on next reset."""
self.env.set_initial_state(initial_state, initial_frames)
self._initial_frames = initial_frames |
Download corpora if necessary and unzip them.
Args:
tmp_dir: directory containing dataset.
dataset_split: whether we're in train/dev/test mode.
Returns:
List of all files generated and path to file containing
train/dev/test split info. | def _maybe_download_corpora(tmp_dir, dataset_split):
"""Download corpora if necessary and unzip them.
Args:
tmp_dir: directory containing dataset.
dataset_split: whether we're in train/dev/test mode.
Returns:
List of all files generated and path to file containing
train/dev/test split info.
"""
cnn_filename = "cnn_stories.tgz"
cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/")
dailymail_filename = "dailymail_stories.tgz"
dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/")
if not tf.gfile.Exists(cnn_finalpath):
cnn_file = generator_utils.maybe_download_from_drive(
tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)
with tarfile.open(cnn_file, "r:gz") as cnn_tar:
cnn_tar.extractall(tmp_dir)
if not tf.gfile.Exists(dailymail_finalpath):
dailymail_file = generator_utils.maybe_download_from_drive(
tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)
with tarfile.open(dailymail_file, "r:gz") as dailymail_tar:
dailymail_tar.extractall(tmp_dir)
cnn_files = tf.gfile.Glob(cnn_finalpath + "*")
dailymail_files = tf.gfile.Glob(dailymail_finalpath + "*")
all_files = cnn_files + dailymail_files
if dataset_split == problem.DatasetSplit.TRAIN:
urls_path = generator_utils.maybe_download(tmp_dir, "all_train.txt",
_TRAIN_URLS)
elif dataset_split == problem.DatasetSplit.EVAL:
urls_path = generator_utils.maybe_download(tmp_dir, "all_val.txt",
_DEV_URLS)
else:
urls_path = generator_utils.maybe_download(tmp_dir, "all_test.txt",
_TEST_URLS)
return all_files, urls_path |
Generate splits of the data. | def example_splits(url_file, all_files):
"""Generate splits of the data."""
def generate_hash(inp):
"""Generate a sha1 hash to match the raw url to the filename extracted."""
h = hashlib.sha1()
h.update(inp)
return h.hexdigest()
all_files_map = {f.split("/")[-1]: f for f in all_files}
urls = [line.strip().encode("utf-8") for line in tf.gfile.Open(url_file)]
filelist = []
for url in urls:
url_hash = generate_hash(url)
filename = url_hash + ".story"
if filename not in all_files_map:
tf.logging.info("Missing file: %s" % url)
continue
filelist.append(all_files_map[filename])
tf.logging.info("Found %d examples" % len(filelist))
return filelist |
Generate examples. | def example_generator(all_files, urls_path, sum_token):
"""Generate examples."""
def fix_run_on_sents(line):
if u"@highlight" in line:
return line
if not line:
return line
if line[-1] in END_TOKENS:
return line
return line + u"."
filelist = example_splits(urls_path, all_files)
story_summary_split_token = u" <summary> " if sum_token else " "
for story_file in filelist:
story = []
summary = []
reading_highlights = False
for line in tf.gfile.Open(story_file, "rb"):
line = text_encoder.to_unicode_utf8(line.strip())
line = fix_run_on_sents(line)
if not line:
continue
elif line.startswith(u"@highlight"):
if not story:
break # No article text.
reading_highlights = True
elif reading_highlights:
summary.append(line)
else:
story.append(line)
if (not story) or not summary:
continue
yield " ".join(story) + story_summary_split_token + " ".join(summary) |
Write text to files. | def write_raw_text_to_files(all_files, urls_path, dataset_split, tmp_dir):
"""Write text to files."""
def write_to_file(all_files, urls_path, tmp_dir, filename):
"""Write text to files."""
with io.open(
os.path.join(tmp_dir, filename + ".source"), "w",
encoding="utf-8") as fstory:
with io.open(
os.path.join(tmp_dir, filename + ".target"), "w",
encoding="utf-8") as fsummary:
for example in example_generator(all_files, urls_path, sum_token=True):
story, summary = _story_summary_split(example)
fstory.write(story + "\n")
fsummary.write(summary + "\n")
if dataset_split == problem.DatasetSplit.TRAIN:
filename = "cnndm.train"
elif dataset_split == problem.DatasetSplit.EVAL:
filename = "cnndm.dev"
else:
filename = "cnndm.test"
tf.logging.info("Writing %s" % filename)
write_to_file(all_files, urls_path, tmp_dir, filename) |
Infer highest epoch number from file names in data_dir. | def infer_last_epoch_num(data_dir):
"""Infer highest epoch number from file names in data_dir."""
names = os.listdir(data_dir)
epochs_str = [re.findall(pattern=r".*\.(-?\d+)$", string=name)
for name in names]
epochs_str = sum(epochs_str, [])
return max([int(epoch_str) for epoch_str in epochs_str]) |
Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env. | def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):
"""Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env.
"""
t2t_env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops
)
# Load data.
if which_epoch_data is not None:
if which_epoch_data == "last":
which_epoch_data = infer_last_epoch_num(data_dir)
assert isinstance(which_epoch_data, int), \
"{}".format(type(which_epoch_data))
t2t_env.start_new_epoch(which_epoch_data, data_dir)
else:
t2t_env.start_new_epoch(-999)
return t2t_env |
Infer name from filenames. | def infer_game_name_from_filenames(data_dir, snake_case=True):
"""Infer name from filenames."""
names = os.listdir(data_dir)
game_names = [re.findall(pattern=r"^Gym(.*)NoFrameskip", string=name)
for name in names]
assert game_names, "No data files found in {}".format(data_dir)
game_names = sum(game_names, [])
game_name = game_names[0]
assert all(game_name == other for other in game_names), \
"There are multiple different game names in {}".format(data_dir)
if snake_case:
game_name = camelcase_to_snakecase(game_name)
return game_name |
Wrap environment with gym.Monitor.
Video recording provided by Monitor requires
1) both height and width of observation to be even numbers.
2) rendering of environment
Args:
env: environment.
video_dir: video directory.
Returns:
wrapped environment. | def wrap_with_monitor(env, video_dir):
"""Wrap environment with gym.Monitor.
Video recording provided by Monitor requires
1) both height and width of observation to be even numbers.
2) rendering of environment
Args:
env: environment.
video_dir: video directory.
Returns:
wrapped environment.
"""
env = ExtendToEvenDimentions(env)
env = RenderObservations(env) # pylint: disable=redefined-variable-type
env = gym.wrappers.Monitor(env, video_dir, force=True,
video_callable=lambda idx: True,
write_upon_reset=True)
return env |
Create SimulatedEnv with minimal subset of hparams. | def create_simulated_env(
output_dir, grayscale, resize_width_factor, resize_height_factor,
frame_stack_size, generative_model, generative_model_params,
random_starts=True, which_epoch_data="last", **other_hparams
):
""""Create SimulatedEnv with minimal subset of hparams."""
# We need these, to initialize T2TGymEnv, but these values (hopefully) have
# no effect on player.
a_bit_risky_defaults = {
"game": "pong", # assumes that T2TGymEnv has always reward_range (-1,1)
"real_batch_size": 1,
"rl_env_max_episode_steps": -1,
"max_num_noops": 0
}
for key in a_bit_risky_defaults:
if key not in other_hparams:
other_hparams[key] = a_bit_risky_defaults[key]
hparams = hparam.HParams(
grayscale=grayscale,
resize_width_factor=resize_width_factor,
resize_height_factor=resize_height_factor,
frame_stack_size=frame_stack_size,
generative_model=generative_model,
generative_model_params=generative_model_params,
**other_hparams
)
return load_data_and_make_simulated_env(
output_dir, wm_dir=None, hparams=hparams,
which_epoch_data=which_epoch_data,
random_starts=random_starts) |
Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories. | def infer_paths(output_dir, **subdirs):
"""Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
"""
directories = {}
for name, path in six.iteritems(subdirs):
directories[name] = path if path else os.path.join(output_dir, name)
directories["output_dir"] = output_dir
return directories |
Adds new frame to (initial) frame stack, removes last one. | def add_to_initial_stack(self, frame):
"""Adds new frame to (initial) frame stack, removes last one."""
if not self._setable_initial_frames:
raise ValueError(
"This instance does not allow to manually set initial frame stack.")
assert_msg = "{}, {}".format(frame.shape, self._initial_frames.shape[:1])
assert frame.shape == self._initial_frames.shape[2:], assert_msg
initial_frames = np.roll(self._initial_frames, shift=-1, axis=1)
initial_frames[0, -1, ...] = frame
self._initial_frames = initial_frames |
Add single zero row/column to observation if needed. | def observation(self, frame):
"""Add single zero row/column to observation if needed."""
if frame.shape == self.observation_space.shape:
return frame
else:
extended_frame = np.zeros(self.observation_space.shape,
self.observation_space.dtype)
assert self.HW_AXES == (0, 1)
extended_frame[:frame.shape[0], :frame.shape[1]] = frame
return extended_frame |
Add new observation to frame stack and infer policy.
Args:
ob: array of shape (height, width, channels)
Returns:
logits and vf. | def infer(self, ob):
"""Add new observation to frame stack and infer policy.
Args:
ob: array of shape (height, width, channels)
Returns:
logits and vf.
"""
self._add_to_stack(ob)
logits, vf = self.infer_from_frame_stack(self._frame_stack)
return logits, vf |
Infer policy from stack of observations.
Args:
ob_stack: array of shape (1, frame_stack_size, height, width, channels)
Returns:
logits and vf. | def infer_from_frame_stack(self, ob_stack):
"""Infer policy from stack of observations.
Args:
ob_stack: array of shape (1, frame_stack_size, height, width, channels)
Returns:
logits and vf.
"""
logits, vf = self.sess.run([self.logits_t, self.value_function_t],
feed_dict={self.obs_t: ob_stack})
return logits, vf |
Normalizes the string using tokenizer.encode.
Args:
raw_str: the input string
Returns:
A string which is ready to be tokenized using split() | def _normalize_string(raw_str):
"""Normalizes the string using tokenizer.encode.
Args:
raw_str: the input string
Returns:
A string which is ready to be tokenized using split()
"""
return " ".join(
token.strip()
for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str))) |
Downloads and extracts the dataset.
Args:
tmp_dir: temp directory to download and extract the dataset
data_dir: The base directory where data and vocab files are stored.
Returns:
tmp_dir: temp directory containing the raw data. | def _prepare_babi_data(tmp_dir, data_dir):
"""Downloads and extracts the dataset.
Args:
tmp_dir: temp directory to download and extract the dataset
data_dir: The base directory where data and vocab files are stored.
Returns:
tmp_dir: temp directory containing the raw data.
"""
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
file_path = os.path.join(tmp_dir, _TAR)
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/63.0.3239.132 Safari/537.36"}
resp = requests.get(_URL, headers=headers)
with open(file_path, "wb") as f:
f.write(resp.content)
tar = tarfile.open(file_path)
tar.extractall(tmp_dir)
tar.close()
return tmp_dir |
It dynamically instantiates a class for each babi subsets-tasks.
@registry.register_problem
class BabiQaConcatAllTasks_10k(EditSequenceRegexProblem):
@property
def babi_task_id(self):
return "qa0"
@property
def babi_subset(self):
return "en-10k"
It does not put the classes into the global namespace, so to access the class
we rely on the registry or this module"s REGISTERED_PROBLEMS list.
It will be available as
registry.problem("babi_qa_concat_all_tasks_10k")
i.e., change camel case to snake case. Numbers are considered lower case
characters for these purposes. | def _register_babi_problems():
"""It dynamically instantiates a class for each babi subsets-tasks.
@registry.register_problem
class BabiQaConcatAllTasks_10k(EditSequenceRegexProblem):
@property
def babi_task_id(self):
return "qa0"
@property
def babi_subset(self):
return "en-10k"
It does not put the classes into the global namespace, so to access the class
we rely on the registry or this module"s REGISTERED_PROBLEMS list.
It will be available as
registry.problem("babi_qa_concat_all_tasks_10k")
i.e., change camel case to snake case. Numbers are considered lower case
characters for these purposes.
"""
for (subset, subset_suffix) in [("en", "_1k"), ("en-10k", "_10k")]:
for problem_name, babi_task_id in six.iteritems(_problems_to_register()):
problem_class = type("BabiQaConcat" + problem_name + subset_suffix,
(BabiQaConcat,), {
"babi_task_id": babi_task_id,
"babi_subset": subset
})
registry.register_problem(problem_class)
REGISTERED_PROBLEMS.append(problem_class.name) |
Parsing the bAbi dataset (train and test).
Args:
tmp_dir: temp directory to download and extract the dataset
babi_task_id: babi task id
subset: babi subset
dataset_split: dataset split (train or eval)
joint_training: if training the model on all tasks.
Returns:
babi_instances: set of training examples, each a dict containing a story,
a question and an answer.
babi_lines: all the texts in the data separated based on their
appearance in the stories, questions, or answers. | def _babi_parser(tmp_dir,
babi_task_id,
subset,
dataset_split,
joint_training=True):
"""Parsing the bAbi dataset (train and test).
Args:
tmp_dir: temp directory to download and extract the dataset
babi_task_id: babi task id
subset: babi subset
dataset_split: dataset split (train or eval)
joint_training: if training the model on all tasks.
Returns:
babi_instances: set of training examples, each a dict containing a story,
a question and an answer.
babi_lines: all the texts in the data separated based on their
appearance in the stories, questions, or answers.
"""
def _data_file(mode, task_id):
"""Generates the path to the data file for the given mode(train/test).
Args:
mode: either train or test for bAbi dataset
task_id: babi task id
Returns:
data file path
"""
file_name = (_TASKS[task_id] + "_{}.txt")
return os.path.join(_DIR_NAME, subset, file_name.format(mode))
def _all_task_raw_data_generator(tmp_dir, data_file, dataset_split):
"""Prepares raw data for all tasks to gether..
Args:
tmp_dir: temp directory
data_file: data file
dataset_split: dataset split
"""
tf.logging.info("Preparing dataset of all task together")
globe_name = ("*_{}.txt")
mode_name = "test"
if dataset_split == problem.DatasetSplit.TRAIN:
mode_name = "train"
files_name = os.path.join(
tmp_dir, _DIR_NAME, subset,
globe_name.format(mode_name))
with tf.gfile.GFile(data_file, "wb") as outfile:
for filename in tf.gfile.Glob(files_name):
if filename == data_file:
# don"t want to copy the output into the output
continue
with tf.gfile.GFile(filename, "rb") as readfile:
shutil.copyfileobj(readfile, outfile)
def _parse_answer(answer):
if (joint_training or babi_task_id in ["qa8", "qa19", "qa0"
]): # "lists-sets" or "path finding"
return "".join([d for d in answer.split(",")]) # as a single token!
else:
return answer
if dataset_split == problem.DatasetSplit.TRAIN:
babi_train_task_id = "qa0" if joint_training else babi_task_id
data_file = os.path.join(tmp_dir, _data_file("train", babi_train_task_id))
else:
data_file = os.path.join(tmp_dir, _data_file("test", babi_task_id))
if ((babi_task_id == "qa0" or joint_training) and
not tf.gfile.Exists(os.path.join(tmp_dir, data_file))):
_all_task_raw_data_generator(tmp_dir, data_file, dataset_split)
tf.logging.info("Parsing %s into training/testing instances...", data_file)
babi_instances = []
with tf.gfile.GFile(data_file, mode="r") as f:
story = []
for line in f:
line_num, line = line.strip().split(" ", 1)
if int(line_num) == 1:
story = []
if "\t" in line:
question, answer, _ = line.split("\t")
question = _normalize_string(question)
substories = [s for s in story if s]
answer = _parse_answer(answer)
instance = {
FeatureNames.STORY: substories,
FeatureNames.QUESTION: question,
FeatureNames.ANSWER: answer
}
babi_instances.append(instance)
story.append("")
else:
story.append(_normalize_string(line))
return babi_instances |
Builds encoder for the given class labels.
Args:
data_dir: data directory
Returns:
An encoder for class labels. | def get_labels_encoder(self, data_dir):
"""Builds encoder for the given class labels.
Args:
data_dir: data directory
Returns:
An encoder for class labels.
"""
label_filepath = os.path.join(data_dir, self.vocab_filename)
return text_encoder.TokenTextEncoder(label_filepath) |
A generator that generates samples that are encoded.
Args:
data_dir: data directory
tmp_dir: temp directory
dataset_split: dataset split
Yields:
A dict. | def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
"""A generator that generates samples that are encoded.
Args:
data_dir: data directory
tmp_dir: temp directory
dataset_split: dataset split
Yields:
A dict.
"""
generator = self.generate_samples(data_dir, tmp_dir, dataset_split)
encoder = self.get_or_create_vocab(data_dir, tmp_dir)
label_encoder = self.get_labels_encoder(data_dir)
for sample in generator:
inputs = encoder.encode(sample["inputs"])
inputs.append(text_encoder.EOS_ID)
context = encoder.encode(sample["context"])
context.append(text_encoder.EOS_ID)
targets = label_encoder.encode(sample["targets"])
sample["targets"] = targets
yield {"inputs": inputs, "context": context, "targets": targets} |
Return a dict for encoding and decoding inference input/output.
Args:
data_dir: data directory
Returns:
A dict of <feature name, TextEncoder>. | def feature_encoders(self, data_dir):
"""Return a dict for encoding and decoding inference input/output.
Args:
data_dir: data directory
Returns:
A dict of <feature name, TextEncoder>.
"""
encoders = (super(BabiQa, self).feature_encoders(data_dir))
label_encoder = self.get_labels_encoder(data_dir)
encoders["targets"] = label_encoder # bAbi as a classification task
return encoders |
Returns problem_hparams.
Args:
defaults: default hyperparameters
unused_model_hparams: model hyperparameters | def hparams(self, defaults, unused_model_hparams):
"""Returns problem_hparams.
Args:
defaults: default hyperparameters
unused_model_hparams: model hyperparameters
"""
(super(BabiQa, self).hparams(defaults, unused_model_hparams))
p = defaults
num_classes = self._encoders["targets"].vocab_size
p.modality = {"targets": modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {"targets": num_classes} |
Splits of data to produce and number the output shards for each. | def dataset_splits(self):
"""Splits of data to produce and number the output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": self.num_train_shards,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": self.num_eval_shards,
}, {
"split": problem.DatasetSplit.TEST,
"shards": self.num_test_shards,
}] |
Traverses directory collecting input and target files. | def _collect_data(directory, input_ext, transcription_ext):
"""Traverses directory collecting input and target files."""
# Directory from string to tuple pair of strings
# key: the filepath to a datafile including the datafile's basename. Example,
# if the datafile was "/path/to/datafile.wav" then the key would be
# "/path/to/datafile"
# value: a pair of strings (media_filepath, label)
data_files = {}
for root, _, filenames in os.walk(directory):
transcripts = [filename for filename in filenames
if transcription_ext in filename]
for transcript in transcripts:
transcript_path = os.path.join(root, transcript)
with open(transcript_path, "r") as transcript_file:
for transcript_line in transcript_file:
line_contents = transcript_line.strip().split(" ", 1)
media_base, label = line_contents
key = os.path.join(root, media_base)
assert key not in data_files
media_name = "%s.%s"%(media_base, input_ext)
media_path = os.path.join(root, media_name)
data_files[key] = (media_base, media_path, label)
return data_files |
Adding to base hparams the attributes for for librispeech. | def add_librispeech_hparams(hparams):
"""Adding to base hparams the attributes for for librispeech."""
hparams.batch_size = 36
hparams.audio_compression = 8
hparams.hidden_size = 2048
hparams.max_input_seq_length = 600000
hparams.max_target_seq_length = 350
hparams.max_length = hparams.max_input_seq_length
hparams.min_length_bucket = hparams.max_input_seq_length // 2
hparams.learning_rate = 0.05
hparams.train_steps = 5000000
hparams.num_hidden_layers = 4
return hparams |
Generates linearized trees and tokens from the wsj tree format.
It uses the linearized algorithm described in https://arxiv.org/abs/1412.7449.
Args:
tree_string: tree in wsj format
Returns:
tuple: (words, linearized tree) | def words_and_tags_from_wsj_tree(tree_string):
"""Generates linearized trees and tokens from the wsj tree format.
It uses the linearized algorithm described in https://arxiv.org/abs/1412.7449.
Args:
tree_string: tree in wsj format
Returns:
tuple: (words, linearized tree)
"""
stack, tags, words = [], [], []
for tok in tree_string.strip().split():
if tok[0] == "(":
symbol = tok[1:]
tags.append(symbol)
stack.append(symbol)
else:
assert tok[-1] == ")"
stack.pop() # Pop the POS-tag.
while tok[-2] == ")":
tags.append("/" + stack.pop())
tok = tok[:-1]
words.append(tok[:-1])
return str.join(" ", words), str.join(" ", tags[1:-1]) |
Generator for parsing as a sequence-to-sequence task that uses tokens.
This generator assumes the files at source_path and target_path have
the same number of lines and yields dictionaries of "inputs" and "targets"
where inputs and targets are token ids from source and target lines
converted to integers using the token_map.
Args:
tree_path: path to the file with WSJ format trees, one per line.
source_token_vocab: GenericVocabulary object for source vocabulary.
target_token_vocab: GenericVocabulary object for target vocabulary.
eos: integer to append at the end of each sequence (default: None).
Yields:
A dictionary {"inputs": source-line, "targets": target-line} where
the lines are integer lists converted from tokens in the file lines. | def token_generator(tree_path, source_token_vocab, target_token_vocab,
eos=None):
"""Generator for parsing as a sequence-to-sequence task that uses tokens.
This generator assumes the files at source_path and target_path have
the same number of lines and yields dictionaries of "inputs" and "targets"
where inputs and targets are token ids from source and target lines
converted to integers using the token_map.
Args:
tree_path: path to the file with WSJ format trees, one per line.
source_token_vocab: GenericVocabulary object for source vocabulary.
target_token_vocab: GenericVocabulary object for target vocabulary.
eos: integer to append at the end of each sequence (default: None).
Yields:
A dictionary {"inputs": source-line, "targets": target-line} where
the lines are integer lists converted from tokens in the file lines.
"""
eos_list = [] if eos is None else [eos]
with tf.gfile.GFile(tree_path, mode="r") as tree_file:
tree_line = tree_file.readline()
while tree_line:
source, target = words_and_tags_from_wsj_tree(tree_line)
source_ints = source_token_vocab.encode(source.strip()) + eos_list
target_ints = target_token_vocab.encode(target.strip()) + eos_list
yield {"inputs": source_ints, "targets": target_ints}
tree_line = tree_file.readline() |
Generator for parsing as a sequence-to-sequence task that uses tokens.
This generator assumes the files parsing_{train,dev}.trees, which contain
trees in WSJ format.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
train: whether we're training or not.
source_vocab_size: source vocab size.
target_vocab_size: target vocab size.
Returns:
A generator to a dictionary of inputs and outputs. | def parsing_token_generator(data_dir, tmp_dir, train, source_vocab_size,
target_vocab_size):
"""Generator for parsing as a sequence-to-sequence task that uses tokens.
This generator assumes the files parsing_{train,dev}.trees, which contain
trees in WSJ format.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
train: whether we're training or not.
source_vocab_size: source vocab size.
target_vocab_size: target vocab size.
Returns:
A generator to a dictionary of inputs and outputs.
"""
# TODO(lukaszkaiser): Correct these calls to generate vocabularies. No data
# sources are being passed.
del (data_dir, tmp_dir, train, source_vocab_size, target_vocab_size)
assert False, "Vocabulary generation not implemented" |
Aggregate stats in per-shard stats files. | def aggregate_stats(stats_files):
"""Aggregate stats in per-shard stats files."""
all_stats = {}
for fname in stats_files:
with tf.gfile.Open(fname) as f:
stats = json.loads(f.read())
for k, v in stats.iteritems():
if k not in all_stats:
if isinstance(v, list):
all_stats[k] = []
else:
all_stats[k] = 0
if isinstance(v, list):
all_stats[k].extend(v)
else:
all_stats[k] += v
stats = all_stats
ref_coverage = float(stats["total_found_refs"]) / stats["total_original_refs"]
len_bounds = [0, 2, 10, 100, 1000, 5000, 10000, 20000, 50000, 100000, 1000000]
len_counts, len_bounds = np.histogram(stats["ref_lengths"], len_bounds)
len_dist = len_counts.astype(np.float32) / len_counts.sum()
wiki_coverage = (float(stats["num_wikis_written"]) /
stats["total_original_wikis"])
wikis_skipped_no_ref = (float(stats["wikis_skipped_no_refs"]) /
stats["total_original_wikis"])
wikis_skipped_no_lead = (float(stats["wikis_skipped_short_lead"]) /
stats["total_original_wikis"])
wiki_ref_coverage = [
float(found) / orig for found, orig
in zip(stats["wiki_found_refs"], stats["wiki_original_refs"]) if found
]
coverage_bounds = np.arange(21).astype(np.float32) / 20
coverage_counts, coverage_bounds = np.histogram(wiki_ref_coverage,
coverage_bounds)
coverage_dist = coverage_counts.astype(np.float32) / coverage_counts.sum()
agg_stats = dict(
total_original_wikis=stats["total_original_wikis"],
total_original_refs=stats["total_original_refs"],
wiki_coverage=wiki_coverage,
wikis_skipped_no_ref=wikis_skipped_no_ref,
wikis_skipped_no_lead=wikis_skipped_no_lead,
overall_ref_coverage=ref_coverage,
per_wiki_ref_coverage_dist=list((coverage_dist * 100).astype(int)),
per_wiki_ref_coverage_bounds=list((coverage_bounds * 100).astype(int)),
ref_len_dist=list((len_dist * 100).astype(int)),
ref_len_bounds=list(len_bounds),
)
return agg_stats |
Map filename to the task id that created it assuming 1k tasks. | def filename_to_task_id(fname):
"""Map filename to the task id that created it assuming 1k tasks."""
# This matches the order and size in WikisumBase.out_filepaths
fname = os.path.basename(fname)
shard_id_increment = {
"train": 0,
"dev": 800,
"test": 900,
}
parts = fname.split("-")
split = parts[1]
shard_id = parts[2]
task_id = int(shard_id) + shard_id_increment[split]
return task_id |
Validate presence and minimum size of files. | def validate_data_files(problem, data_files, min_size):
"""Validate presence and minimum size of files."""
# Check that all files are present
data_dir = os.path.split(data_files[0])[0]
out_filepaths = problem.out_filepaths(data_dir)
missing_filepaths = set(out_filepaths) - set(data_files)
if missing_filepaths:
tf.logging.error("Missing %d data files", len(missing_filepaths))
# Check that each file is at least 100M
too_small = []
for data_file in data_files:
length = get_length(data_file)
if length < min_size:
too_small.append(data_file)
if too_small:
tf.logging.error("%d files too small", len(too_small))
bad_files = too_small + list(missing_filepaths)
return bad_files |
Set of hyperparameters. | def distill_resnet_32_to_15_cifar20x5():
"""Set of hyperparameters."""
hparams = distill_base()
hparams.teacher_model = "resnet"
hparams.teacher_hparams = "resnet_cifar_32"
hparams.student_model = "resnet"
hparams.student_hparams = "resnet_cifar_15"
hparams.optimizer_momentum_nesterov = True
# (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)
hparams.teacher_learning_rate = 0.25 * 128. * 8. / 256.
hparams.student_learning_rate = 0.2 * 128. * 8. / 256.
hparams.learning_rate_decay_scheme = "piecewise"
hparams.add_hparam("learning_rate_boundaries", [40000, 60000, 80000])
hparams.add_hparam("learning_rate_multiples", [0.1, 0.01, 0.001])
hparams.task_balance = 0.28
hparams.distill_temperature = 2.0
hparams.num_classes = 20
return hparams |
Downloading and preparing the dataset.
Args:
tmp_dir: tem directory
data_dir: data directory
vocab_size: size of vocabulary
vocab_filename: name of vocab file | def _prepare_lambada_data(tmp_dir, data_dir, vocab_size, vocab_filename):
"""Downloading and preparing the dataset.
Args:
tmp_dir: tem directory
data_dir: data directory
vocab_size: size of vocabulary
vocab_filename: name of vocab file
"""
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
file_path = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
tar_all = tarfile.open(file_path)
tar_all.extractall(tmp_dir)
tar_all.close()
tar_train = tarfile.open(os.path.join(tmp_dir, "train-novels.tar"))
tar_train.extractall(tmp_dir)
tar_train.close()
vocab_path = os.path.join(data_dir, vocab_filename)
if not tf.gfile.Exists(vocab_path):
with tf.gfile.GFile(os.path.join(tmp_dir, _VOCAB), "r") as infile:
reader = csv.reader(infile, delimiter="\t")
words = [row[0] for row in reader]
words = [_UNK] + words[:vocab_size]
with tf.gfile.GFile(vocab_path, "w") as outfile:
outfile.write("\n".join(words)) |
Gives the file paths with regards to the given split.
Args:
tmp_dir: temp directory
split: dataset split
use_control_set: uses control dataset if true.
Returns:
list of file paths. | def get_dataset_split(tmp_dir, split, use_control_set):
"""Gives the file paths with regards to the given split.
Args:
tmp_dir: temp directory
split: dataset split
use_control_set: uses control dataset if true.
Returns:
list of file paths.
"""
if not use_control_set:
dataset_split = {
problem.DatasetSplit.TRAIN: [
f for f in tf.gfile.Glob(
os.path.join(tmp_dir, "train-novels/*/*.txt"))
],
problem.DatasetSplit.EVAL: [
os.path.join(tmp_dir, "lambada_development_plain_text.txt")
],
problem.DatasetSplit.TEST: [
os.path.join(tmp_dir, "lambada_test_plain_text.txt")
]
}
else:
dataset_split = {
problem.DatasetSplit.TRAIN: [
f for f in tf.gfile.Glob(
os.path.join(tmp_dir, "train-novels/*/*.txt"))
],
problem.DatasetSplit.EVAL: [
os.path.join(tmp_dir, "lambada_control_test_data_plain_text.txt")
],
}
return dataset_split[split] |
Determine the minimum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The minimum length that a sequence can be for this dataset_split. | def min_sequence_length(self, dataset_split):
"""Determine the minimum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The minimum length that a sequence can be for this dataset_split.
"""
return {
problem.DatasetSplit.TRAIN: 8,
problem.DatasetSplit.EVAL: 65,
problem.DatasetSplit.TEST: 65
}[dataset_split] |
Determine the maximum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The maximum length that a sequence can be for this dataset_split. | def max_sequence_length(self, dataset_split):
"""Determine the maximum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The maximum length that a sequence can be for this dataset_split.
"""
return {
problem.DatasetSplit.TRAIN: 64,
problem.DatasetSplit.EVAL: 128,
problem.DatasetSplit.TEST: 128
}[dataset_split] |
Determine the dataset sized given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The desired number of samples for this dataset_split. | def num_samples(self, dataset_split):
"""Determine the dataset sized given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The desired number of samples for this dataset_split.
"""
return {
problem.DatasetSplit.TRAIN: 1000000,
problem.DatasetSplit.EVAL: 10000,
problem.DatasetSplit.TEST: 10000
}[dataset_split] |
Yields successive checkpoints from model_dir.
Args:
model_dir: The directory in which checkpoints are saved.
timeout_mins: The maximum amount of time in minutes to wait
between checkpoints. Set this to -1 to wait indefinitely.
Yields:
last_ckpt: a new checkpoint path, or None if the timeout was reached. | def next_checkpoint(model_dir, timeout_mins=240):
"""Yields successive checkpoints from model_dir.
Args:
model_dir: The directory in which checkpoints are saved.
timeout_mins: The maximum amount of time in minutes to wait
between checkpoints. Set this to -1 to wait indefinitely.
Yields:
last_ckpt: a new checkpoint path, or None if the timeout was reached.
"""
last_ckpt = None
timeout_secs = None
if timeout_mins != -1:
timeout_secs = timeout_mins * 60
while True:
last_ckpt = tf.contrib.training.wait_for_new_checkpoint(
model_dir, last_ckpt, seconds_to_sleep=60, timeout=timeout_secs)
if last_ckpt is None:
tf.logging.info(
"Eval timeout: no new checkpoints within %dm" % timeout_mins)
break
yield last_ckpt |
Yields successive checkpoints from model_dir. | def next_undecoded_checkpoint(model_dir, timeout_mins=240):
"""Yields successive checkpoints from model_dir."""
last_ckpt = None
last_step = 0
while True:
# Get the latest checkpoint.
last_ckpt = tf.contrib.training.wait_for_new_checkpoint(
model_dir, last_ckpt, seconds_to_sleep=60, timeout=60 * timeout_mins)
# Get all the checkpoint from the model dir.
ckpt_path = tf.train.get_checkpoint_state(model_dir)
all_model_checkpoint_paths = ckpt_path.all_model_checkpoint_paths
ckpt_step = np.inf
next_ckpt = None
# Find the next checkpoint to eval based on last_step.
for ckpt in all_model_checkpoint_paths:
step = int(os.path.basename(ckpt).split("-")[1])
if step > last_step and step < ckpt_step:
ckpt_step = step
next_ckpt = ckpt
# If all the checkpoints have been evaluated.
if last_ckpt is None and next_ckpt is None:
tf.logging.info(
"Eval timeout: no new checkpoints within %dm" % timeout_mins)
break
if next_ckpt is not None:
last_step = ckpt_step
last_ckpt = next_ckpt
yield last_ckpt |
The TensorFlow Session config to use. | def create_session_config(log_device_placement=False,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
use_tpu=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0):
"""The TensorFlow Session config to use."""
if use_tpu:
graph_options = tf.GraphOptions()
else:
if enable_graph_rewriter:
rewrite_options = rewriter_config_pb2.RewriterConfig()
rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON
graph_options = tf.GraphOptions(rewrite_options=rewrite_options)
else:
graph_options = tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=False,
global_jit_level=xla_jit_level))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads,
isolate_session_state=True)
return config |
Create RunConfig, TPUConfig, and Parallelism object. | def create_run_config(model_name,
master="",
model_dir=None,
iterations_per_loop=1000,
num_shards=8,
log_device_placement=False,
save_checkpoints_steps=1000,
save_checkpoints_secs=None,
keep_checkpoint_max=20,
keep_checkpoint_every_n_hours=10000,
num_gpus=1,
gpu_order="",
num_async_replicas=1,
enable_graph_rewriter=False,
gpu_mem_fraction=0.95,
no_data_parallelism=False,
optionally_use_dist_strat=False,
daisy_chain_variables=True,
schedule="continuous_train_and_eval",
worker_job="/job:localhost",
worker_id=0,
ps_replicas=0,
ps_job="/job:ps",
ps_gpu=0,
random_seed=None,
sync=False,
tpu_infeed_sleep_secs=None,
use_tpu=False,
use_tpu_estimator=False,
xla_jit_level=tf.OptimizerOptions.OFF,
inter_op_parallelism_threads=0,
log_step_count_steps=100,
intra_op_parallelism_threads=0,
tpu_config_extra_kwargs=None,
cloud_tpu_name=""):
"""Create RunConfig, TPUConfig, and Parallelism object."""
session_config = create_session_config(
log_device_placement=log_device_placement,
enable_graph_rewriter=enable_graph_rewriter,
gpu_mem_fraction=gpu_mem_fraction,
use_tpu=use_tpu,
xla_jit_level=xla_jit_level,
inter_op_parallelism_threads=inter_op_parallelism_threads,
intra_op_parallelism_threads=intra_op_parallelism_threads)
run_config_args = {
"master": master,
"evaluation_master": master,
"model_dir": model_dir,
"session_config": session_config,
"save_summary_steps": 100,
"save_checkpoints_steps": save_checkpoints_steps,
"save_checkpoints_secs": save_checkpoints_secs,
"keep_checkpoint_max": keep_checkpoint_max,
"keep_checkpoint_every_n_hours": keep_checkpoint_every_n_hours,
"tf_random_seed": random_seed,
"log_step_count_steps": log_step_count_steps
}
if save_checkpoints_secs:
del run_config_args["save_checkpoints_steps"]
run_config_cls = tf.contrib.learn.RunConfig
if use_tpu or use_tpu_estimator:
# If using TPUEstimator, use TPU RunConfig, add TPUConfig, and add
# additional args.
tpu_config_kwargs = {
"iterations_per_loop": iterations_per_loop,
"num_shards": num_shards,
"per_host_input_for_training": True,
"initial_infeed_sleep_secs": tpu_infeed_sleep_secs,
}
if tpu_config_extra_kwargs is not None:
tpu_config_kwargs.update(tpu_config_extra_kwargs)
run_config_cls = tf.contrib.tpu.RunConfig
tpu_config = tf.contrib.tpu.TPUConfig(
**tpu_config_kwargs)
run_config_args["tpu_config"] = tpu_config
if not master and "KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS" in os.environ:
# If running on TPU but no master is set and the KUBE env var is present
# then we're running on ML Engine. Set the master.
run_config_args["master"] = os.environ[
"KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS"]
run_config_args["evaluation_master"] = run_config_args["master"]
elif not master and cloud_tpu_name:
# Update run_config to use cluster instead of master/evaluation_master
# as we need the cluster spec to use Cloud Pods
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
cloud_tpu_name)
run_config_args["cluster"] = tpu_cluster_resolver
del run_config_args["master"]
del run_config_args["evaluation_master"]
elif is_cloud_async_distributed():
run_config_cls = tf.estimator.RunConfig
del run_config_args["master"]
del run_config_args["evaluation_master"]
config = run_config_cls(**run_config_args)
# If not using TPU, add device info for data_parallelism
config.use_tpu = use_tpu
if not use_tpu:
config.t2t_device_info = {
"num_async_replicas": num_async_replicas,
}
use_distribution_strategy = (
optionally_use_dist_strat and
t2t_model.T2TModel.has_symmetric_shards(model_name) and
not no_data_parallelism and ps_replicas == 0 and ps_gpu == 0 and
num_async_replicas == 1)
if use_distribution_strategy:
tf.logging.info(
"Configuring MirroredStrategy DistributionStrategy to replicate the "
"model."
)
distribution = tf.contrib.distribute.MirroredStrategy()
config = config.replace(train_distribute=distribution)
config.data_parallelism = None
else:
tf.logging.info("Configuring DataParallelism to replicate the model.")
config.data_parallelism = devices.data_parallelism(
daisy_chain_variables=daisy_chain_variables,
ps_replicas=ps_replicas,
ps_job=ps_job,
ps_gpu=ps_gpu,
schedule=schedule,
sync=sync,
worker_gpu=num_gpus,
worker_replicas=num_async_replicas,
worker_id=worker_id,
gpu_order=gpu_order,
worker_job=worker_job,
no_data_parallelism=no_data_parallelism)
return config |
Create a T2T Estimator. | def create_estimator(model_name,
hparams,
run_config,
schedule="train_and_evaluate",
decode_hparams=None,
use_tpu=False,
use_tpu_estimator=False,
use_xla=False):
"""Create a T2T Estimator."""
model_fn = t2t_model.T2TModel.make_estimator_model_fn(
model_name, hparams, decode_hparams=decode_hparams, use_tpu=use_tpu)
del use_xla
if use_tpu or use_tpu_estimator:
problem = hparams.problem
batch_size = (
problem.tpu_batch_size_per_shard(hparams) *
run_config.tpu_config.num_shards)
mlperf_log.transformer_print(
key=mlperf_log.INPUT_BATCH_SIZE, value=batch_size)
if getattr(hparams, "mtf_mode", False):
batch_size = problem.tpu_batch_size_per_shard(hparams)
predict_batch_size = batch_size
if decode_hparams and decode_hparams.batch_size:
predict_batch_size = decode_hparams.batch_size
if decode_hparams and run_config.tpu_config:
decode_hparams.add_hparam("iterations_per_loop",
run_config.tpu_config.iterations_per_loop)
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
config=run_config,
use_tpu=use_tpu,
train_batch_size=batch_size,
eval_batch_size=batch_size if "eval" in schedule else None,
predict_batch_size=predict_batch_size,
experimental_export_device_assignment=True)
else:
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
config=run_config,
)
return estimator |
Create train and eval hooks for Experiment. | def create_hooks(use_tfdbg=False,
use_dbgprofile=False,
dbgprofile_kwargs=None,
use_validation_monitor=False,
validation_monitor_kwargs=None,
use_early_stopping=False,
early_stopping_kwargs=None):
"""Create train and eval hooks for Experiment."""
train_hooks = []
eval_hooks = []
if use_tfdbg:
hook = debug.LocalCLIDebugHook()
train_hooks.append(hook)
eval_hooks.append(hook)
if use_dbgprofile:
# Recorded traces can be visualized with chrome://tracing/
# The memory/tensor lifetime is also profiled
tf.logging.info("Using ProfilerHook")
defaults = dict(save_steps=10, show_dataflow=True, show_memory=True)
defaults.update(dbgprofile_kwargs)
train_hooks.append(tf.train.ProfilerHook(**defaults))
if use_validation_monitor:
tf.logging.info("Using ValidationMonitor")
train_hooks.append(
tf.contrib.learn.monitors.ValidationMonitor(
hooks=eval_hooks, **validation_monitor_kwargs))
if use_early_stopping:
tf.logging.info("Using EarlyStoppingHook")
hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs)
# Adding to both training and eval so that eval aborts as well
train_hooks.append(hook)
eval_hooks.append(hook)
return train_hooks, eval_hooks |
Create Experiment. | def create_experiment(
run_config,
hparams,
model_name,
problem_name,
data_dir,
train_steps,
eval_steps,
min_eval_frequency=2000,
eval_throttle_seconds=600,
schedule="train_and_evaluate",
export=False,
decode_hparams=None,
use_tfdbg=False,
use_dbgprofile=False,
eval_early_stopping_steps=None,
eval_early_stopping_metric=None,
eval_early_stopping_metric_delta=None,
eval_early_stopping_metric_minimize=True,
eval_timeout_mins=240,
eval_use_test_set=False,
use_tpu=False,
use_tpu_estimator=False,
use_xla=False,
additional_train_hooks=None,
additional_eval_hooks=None,
warm_start_from=None,
decode_from_file="",
decode_to_file="",
decode_reference="",
std_server_protocol=None):
"""Create Experiment."""
# HParams
hparams.add_hparam("model_dir", run_config.model_dir)
hparams.add_hparam("data_dir", data_dir)
hparams.add_hparam("train_steps", train_steps)
hparams.add_hparam("eval_steps", eval_steps)
hparams.add_hparam("schedule", schedule)
hparams.add_hparam("warm_start_from", warm_start_from)
hparams.add_hparam("std_server_protocol", std_server_protocol)
hparams.add_hparam("eval_freq_in_steps", min_eval_frequency)
hparams.add_hparam("eval_timeout_mins", eval_timeout_mins)
if decode_hparams is not None:
decode_hparams.add_hparam("decode_from_file", decode_from_file)
if decode_to_file and not decode_hparams.decode_to_file:
decode_hparams.decode_to_file = decode_to_file
if decode_reference and not decode_hparams.decode_reference:
decode_hparams.decode_reference = decode_reference
add_problem_hparams(hparams, problem_name)
# Estimator
estimator = create_estimator(
model_name,
hparams,
run_config,
schedule=schedule,
decode_hparams=decode_hparams,
use_tpu=use_tpu,
use_tpu_estimator=use_tpu_estimator,
use_xla=use_xla)
# Input fns from Problem
problem = hparams.problem
train_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.TRAIN,
hparams)
dataset_split = "test" if eval_use_test_set else None
dataset_kwargs = {"dataset_split": dataset_split}
eval_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL,
hparams,
dataset_kwargs=dataset_kwargs)
# Export
exporter = None
if export:
def compare_fn(best_eval_result, current_eval_result):
metric = eval_early_stopping_metric or "loss"
return current_eval_result[metric] < best_eval_result[metric]
def serving_input_receiver_fn(hparams, decode_hparams, use_tpu):
return problem.serving_input_fn(hparams, decode_hparams, use_tpu)
exporter = tf.estimator.BestExporter(
name="best",
serving_input_receiver_fn=serving_input_receiver_fn,
compare_fn=compare_fn,
assets_extra=problem.export_assets)
# Hooks
validation_monitor_kwargs = dict(
input_fn=eval_input_fn,
eval_steps=eval_steps,
every_n_steps=min_eval_frequency,
early_stopping_rounds=eval_early_stopping_steps,
early_stopping_metric=eval_early_stopping_metric,
early_stopping_metric_minimize=eval_early_stopping_metric_minimize)
dbgprofile_kwargs = {"output_dir": run_config.model_dir}
early_stopping_kwargs = dict(
events_dir=os.path.join(run_config.model_dir, "eval_continuous"),
tag=eval_early_stopping_metric,
num_plateau_steps=eval_early_stopping_steps,
plateau_decrease=eval_early_stopping_metric_minimize,
plateau_delta=eval_early_stopping_metric_delta,
every_n_steps=min_eval_frequency)
# Eval on TPU Pods is not supported yet
if use_tpu and run_config.tpu_config.num_shards > 8 and "eval" in schedule:
raise ValueError("Eval is not currently supported on a TPU Pod")
# In-process eval (and possible early stopping)
if schedule == "continuous_train_and_eval" and min_eval_frequency:
tf.logging.warn("ValidationMonitor only works with "
"--schedule=train_and_evaluate")
use_validation_monitor = (
schedule == "train_and_evaluate" and min_eval_frequency)
# Distributed early stopping
local_schedules = ["train_and_evaluate", "continuous_train_and_eval"]
use_early_stopping = (
schedule not in local_schedules and eval_early_stopping_steps)
train_hooks, eval_hooks = create_hooks(
use_tfdbg=use_tfdbg,
use_dbgprofile=use_dbgprofile,
dbgprofile_kwargs=dbgprofile_kwargs,
use_validation_monitor=use_validation_monitor,
validation_monitor_kwargs=validation_monitor_kwargs,
use_early_stopping=use_early_stopping,
early_stopping_kwargs=early_stopping_kwargs)
hook_context = HookContext(
estimator=estimator, problem=problem, hparams=hparams)
train_hooks += t2t_model.T2TModel.get_train_hooks(model_name, hook_context)
eval_hooks += t2t_model.T2TModel.get_eval_hooks(model_name, hook_context)
if additional_train_hooks:
train_hooks += additional_train_hooks
if additional_eval_hooks:
eval_hooks += additional_eval_hooks
train_hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks(
train_hooks, estimator)
eval_hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks(
eval_hooks, estimator)
train_spec = tf.estimator.TrainSpec(
train_input_fn, max_steps=train_steps, hooks=train_hooks)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=eval_steps,
hooks=eval_hooks,
start_delay_secs=0 if hparams.schedule == "evaluate" else 120,
throttle_secs=eval_throttle_seconds,
exporters=exporter)
return T2TExperiment(estimator, hparams, train_spec, eval_spec,
use_validation_monitor, decode_hparams) |
Wrapper for canonical experiment_fn. See create_experiment. | def create_experiment_fn(*args, **kwargs):
"""Wrapper for canonical experiment_fn. See create_experiment."""
def experiment_fn(run_config, hparams):
return create_experiment(run_config, hparams, *args, **kwargs)
return experiment_fn |
Restore from a checkpoint. | def restore_checkpoint(ckpt_dir, saver, sess, must_restore=False):
"""Restore from a checkpoint."""
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if must_restore and not ckpt:
raise ValueError("No checkpoint found in %s" % ckpt_dir)
if not ckpt:
return 0
path = ckpt.model_checkpoint_path
tf.logging.info("Restoring checkpoint %s", path)
saver.restore(sess, path)
step = int(path.split("-")[-1])
return step |
Does eval and decode after training every eval_freq_in_steps. | def train_eval_and_decode(self):
"""Does eval and decode after training every eval_freq_in_steps."""
eval_steps = self._hparams.eval_freq_in_steps
packed_dataset = "_packed" in self._hparams.problem.name
mlperf_log.transformer_print(key=mlperf_log.TRAIN_LOOP)
for i in range(0, self._train_spec.max_steps, eval_steps):
mlperf_log.transformer_print(
key=mlperf_log.TRAIN_EPOCH, value=i // eval_steps)
if packed_dataset and i > 0:
problem = registry.problem(self._hparams.problem.name + "_packed")
p_hparams = problem.get_hparams(self._hparams)
self._hparams.problem = problem
self._hparams.problem_hparams = p_hparams
self._estimator.train(
self._train_spec.input_fn,
steps=eval_steps,
hooks=self._train_spec.hooks)
self._set_eval_dir_name("eval")
self._estimator.evaluate(
self._eval_spec.input_fn,
steps=self._eval_spec.steps,
hooks=self._eval_spec.hooks,
name="eval")
if packed_dataset:
problem = registry.problem(
self._hparams.problem.name.replace("_packed", ""))
p_hparams = problem.get_hparams(self._hparams)
self._hparams.problem = problem
self._hparams.problem_hparams = p_hparams
mlperf_log.transformer_print(key=mlperf_log.EVAL_START)
if self._hparams.mlperf_mode:
self._decode_hparams.mlperf_decode_step = i + eval_steps
self.decode(dataset_split=tf.estimator.ModeKeys.EVAL)
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "true"})
break
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and not d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "false"}) |
Evaluate until checkpoints stop being produced. | def continuous_eval(self):
"""Evaluate until checkpoints stop being produced."""
for ckpt_path in next_checkpoint(self._hparams.model_dir,
self._hparams.eval_timeout_mins):
# Skip zero'th step.
train_step = decoding.get_step_from_ckpt_path(ckpt_path)
if train_step == 0:
tf.logging.info("Skipping evaluation at step 0")
continue
self.evaluate() |
Evaluate on train data until checkpoints stop being produced. | def continuous_eval_on_train_data(self):
"""Evaluate on train data until checkpoints stop being produced."""
for ckpt_path in next_checkpoint(self._hparams.model_dir,
self._hparams.eval_timeout_mins):
# Skip zero'th step.
train_step = decoding.get_step_from_ckpt_path(ckpt_path)
if train_step == 0:
tf.logging.info("Skipping evaluation at step 0")
continue
self.evaluate_on_train_data() |
Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server. | def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
config = tf.estimator.RunConfig()
server = tf.train.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
protocol=config.protocol)
server.join() |
Decodes from dataset or file. | def decode(self,
dataset_split=None,
decode_from_file=False,
checkpoint_path=None):
"""Decodes from dataset or file."""
if decode_from_file:
decoding.decode_from_file(self._estimator,
self._decode_hparams.decode_from_file,
self._hparams,
self._decode_hparams,
self._decode_hparams.decode_to_file)
else:
decoding.decode_from_dataset(
self._estimator,
self._hparams.problem.name,
self._hparams,
self._decode_hparams,
dataset_split=dataset_split,
checkpoint_path=checkpoint_path) |
Decode from dataset on new checkpoint. | def continuous_decode(self):
"""Decode from dataset on new checkpoint."""
for _ in next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins):
self.decode() |
Decode from dataset on new checkpoint. | def continuous_decode_on_train_data(self):
"""Decode from dataset on new checkpoint."""
for _ in next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins):
self.decode(dataset_split=tf.estimator.ModeKeys.TRAIN) |
Decode from dataset on new checkpoint. | def continuous_decode_on_eval_data(self):
"""Decode from dataset on new checkpoint."""
if self._hparams.mlperf_mode:
ckpt_generator = next_undecoded_checkpoint(
self._hparams.model_dir, self._decode_hparams.decode_timeout_mins)
else:
ckpt_generator = next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins)
for ckpt in ckpt_generator:
current_step = decoding.get_step_from_ckpt_path(ckpt)
tf.logging.info("Decoding step %d" % current_step)
# Skip checkpoint 0.
if current_step == 0:
continue
# Decode the latest checkpoint by default.
checkpoint_path = None
if self._hparams.mlperf_mode:
self._decode_hparams.mlperf_decode_step = current_step
checkpoint_path = ckpt
mlperf_log.transformer_print(key=mlperf_log.EVAL_START)
self.decode(
dataset_split=tf.estimator.ModeKeys.EVAL,
checkpoint_path=checkpoint_path)
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "true"})
break
d_hparams = self._decode_hparams
if self._hparams.mlperf_mode and not d_hparams.mlperf_success:
mlperf_log.transformer_print(
key=mlperf_log.RUN_STOP, value={"success": "false"}) |
Decode from file on new checkpoint. | def continuous_decode_from_file(self):
"""Decode from file on new checkpoint."""
for _ in next_checkpoint(self._hparams.model_dir,
self._decode_hparams.decode_timeout_mins):
self.decode(decode_from_file=True) |
Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting. | def _flatten_dict(original_dict):
"""Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
"""
flat_dict = {}
for key, value in original_dict.items():
if isinstance(value, dict):
for name, tensor in value.items():
if isinstance(tensor, dict):
raise ValueError("flatten_dict only handles 2 levels of nesting.")
flat_key = "__" + key + "_" + name
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict |
Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure. | def _unflatten_dict(flat_dict, prefixes):
"""Returns a dict of dicts if any prefixes match keys in the flat dict.
The function handles the case where the prefix may not be a dict.
Args:
flat_dict: A dict without any nesting.
prefixes: A list of strings which may have been dicts in the
original structure.
"""
original_dict = {}
for key, value in flat_dict.items():
prefix_found = False
for prefix in prefixes:
full_prefix = "__" + prefix + "_"
if key.startswith(full_prefix):
# Add a dict to the original dict with key=prefix
if prefix not in original_dict:
original_dict[prefix] = {}
original_dict[prefix][key[len(full_prefix):]] = value
prefix_found = True
break
if not prefix_found:
# No key matched a prefix in the for loop.
original_dict[key] = value
return original_dict |
Dummy vars for restore to work when not using TPU codepath. | def create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False) |
Create the metrics_fn that TPUEstimatorSpec expects. | def create_tpu_eval_metrics_fn(problem, model_hparams):
"""Create the metrics_fn that TPUEstimatorSpec expects."""
metric_fns = []
eval_metrics = problem.eval_metric_fns(model_hparams)
tm = _create_target_modality(problem.get_hparams(model_hparams).modality)
if isinstance(tm, dict):
for k, v in six.iteritems(tm):
weights_fn = modalities.get_weights_fn(v)
def make_metric_fn(metric_fn):
def wrapped_metric_fn(logits, labels, features, weights_fn=weights_fn):
kwargs = {}
args, _, keywords, _ = inspect.getargspec(metric_fn)
if ("features" in args) or keywords:
kwargs["features"] = features
num, den = metric_fn(logits, labels, weights_fn=weights_fn, **kwargs)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric, metric_fn in six.iteritems(eval_metrics):
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "%s/metrics-%s/%s" % (k, problem.name, metric)
metric_fns.append((name, make_metric_fn(metric_fn)))
else:
weights_fn = modalities.get_weights_fn(tm)
def make_metric_fn(metric_fn):
def wrapped_metric_fn(logits, labels, features):
kwargs = {}
args, _, keywords, _ = inspect.getargspec(metric_fn)
if ("features" in args) or keywords:
kwargs["features"] = features
num, den = metric_fn(logits, labels, weights_fn=weights_fn, **kwargs)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric, metric_fn in six.iteritems(eval_metrics):
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "metrics-%s/%s" % (problem.name, metric)
metric_fns.append((name, make_metric_fn(metric_fn)))
def all_metrics_fn(**kwargs):
"""Construct metrics dictionary."""
original_kwargs = _unflatten_dict(kwargs, prefixes=["logits", "features"])
del kwargs
logits = original_kwargs["logits"]
labels = original_kwargs["labels"]
features = original_kwargs["features"]
del original_kwargs
metrics_dict = {}
for name, fn in metric_fns:
if isinstance(logits, dict) and isinstance(labels, dict):
for k, v in six.iteritems(logits):
metrics_dict["%s/%s" % (k, name)] = fn(v, labels[k], features)
elif isinstance(logits, dict):
tf.logging.warning("Logits is a dict, but labels is not; only "
"evaluating logits['targets'] against labels.")
metrics_dict["%s/%s" % ("targets", name)] = fn(logits["targets"],
labels, features)
else:
metrics_dict[name] = fn(logits, labels, features)
return metrics_dict
return all_metrics_fn |
Remove summaries from the default graph. | def remove_summaries():
"""Remove summaries from the default graph."""
g = tf.get_default_graph()
key = tf.GraphKeys.SUMMARIES
log_debug("Remove summaries %s" % str(g.get_collection(key)))
del g.get_collection_ref(key)[:]
assert not g.get_collection(key) |
Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call. | def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
log_info("summary_kwargs %s" % str(summary_kwargs))
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs) |
Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss> | def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses |
Generate summaries for features. | def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and
(v.dtype != tf.string)):
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding)) |
Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter | def _compose_custom_getters(getter_a, getter_b):
"""Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
"""
if not getter_a:
return getter_b
if not getter_b:
return getter_a
def getter_fn(getter, *args, **kwargs):
return getter_b(functools.partial(getter_a, getter), *args, **kwargs)
return getter_fn |
Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter. | def set_custom_getter_compose(custom_getter):
"""Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
"""
tf.get_variable_scope().set_custom_getter(
_compose_custom_getters(tf.get_variable_scope().custom_getter,
custom_getter)) |
Initialize variables from given directory. | def initialize_from_ckpt(ckpt_dir, hparams):
"""Initialize variables from given directory."""
model_dir = hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
return
tf.logging.info("Checkpoint dir: %s", ckpt_dir)
reader = tf.contrib.framework.load_checkpoint(ckpt_dir)
variable_map = {}
for var in tf.contrib.framework.get_trainable_variables():
var_name = var.name.split(":")[0]
if reader.has_tensor(var_name):
tf.logging.info("Loading variable from checkpoint: %s", var_name)
variable_map[var_name] = var
else:
tf.logging.info("Cannot find variable in checkpoint, skipping: %s",
var_name)
tf.train.init_from_checkpoint(ckpt_dir, variable_map) |
Whether the target modality is real-valued. | def _target_modality_is_real(self):
"""Whether the target modality is real-valued."""
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality = self._problem_hparams.modality["targets"]
modality_name = self._hparams.name.get(
"targets",
modalities.get_name(modality))(self._hparams, vocab_size)
return modality_name.startswith("real") |
Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards. | def model_fn_sharded(self, sharded_features):
"""Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
"""
dp = self._data_parallelism
# [{str: Tensor}]. Transpose of 'sharded_features'.
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded():
if self.hparams.scheduled_sampling_prob > 0.0:
raise NotImplementedError(
"Scheduled sampling for non-sharded body only.")
# MoE models override body_sharded
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = collections.OrderedDict()
sharded_losses = collections.OrderedDict()
for k, v in sorted(six.iteritems(body_out)):
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([({
"training": l
} for l in loss) for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
if isinstance(sharded_losses, tuple):
nums, dens = sharded_losses
sharded_losses = zip(nums, dens)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
sharded_logits, sharded_losses = dp(
self.maybe_scheduled_sampling,
datashard_to_features, sharded_logits, sharded_losses)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
return sharded_logits, losses |
Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed. | def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
"""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features |
Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
When targets are generated at training time:
logits == {
"self_generated_targets": <generated targets tensor>
"logits": <original logits Tensor or dict>
} | def top(self, body_output, features):
"""Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
When targets are generated at training time:
logits == {
"self_generated_targets": <generated targets tensor>
"logits": <original logits Tensor or dict>
}
"""
if isinstance(body_output, dict):
logits = {}
for k, v in six.iteritems(body_output):
# TODO(aidangomez): share variables here?
with tf.variable_scope(k) as top_vs:
self._add_variable_scope("top_%s" % k, top_vs)
logits[k] = self._top_single(v, k, features)
return logits
else:
return self._top_single(body_output, "targets", features) |
Return a training op minimizing loss. | def optimize(self, loss, num_async_replicas=1, use_tpu=False):
"""Return a training op minimizing loss."""
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu)
return train_op |
Set hparams with the given mode. | def set_mode(self, mode):
"""Set hparams with the given mode."""
log_info("Setting T2TModel mode to '%s'", mode)
hparams = hparams_lib.copy_hparams(self._original_hparams)
hparams.add_hparam("mode", mode)
# When not in training mode, set all forms of dropout to zero.
if mode != tf.estimator.ModeKeys.TRAIN:
for key in hparams.values():
if key.endswith("dropout") or key == "label_smoothing":
log_info("Setting hparams.%s to 0.0", key)
setattr(hparams, key, 0.0)
self._hparams = hparams |
Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training". | def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
"""
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"] |
A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
} | def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
"""
set_custom_getter_compose(self._custom_getter)
with self._eager_var_store.as_default():
# TODO(rsepassi): Make decoding work with real-valued model outputs
# (i.e. if the target modality is RealModality).
self.prepare_features_for_infer(features)
if not self.has_input and beam_size > 1:
log_warn("Beam searching for a model with no inputs.")
if not self.has_input and self.hparams.sampling_method != "random":
log_warn("Non-random sampling for a model with no inputs.")
self._fill_problem_hparams_features(features)
if self._problem_hparams:
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
beam_size = 1 # No use to run beam-search for a single class.
if beam_size == 1:
log_info("Greedy Decoding")
results = self._greedy_infer(features, decode_length, use_tpu)
else:
log_info("Beam Decoding with beam size %d" % beam_size)
results = self._beam_decode(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
return results |
Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search | def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha, use_tpu) |
Slow version of Beam search decoding.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do slow beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search.
Raises:
NotImplementedError: If use_tpu is set to true. | def _beam_decode_slow(self, features, decode_length, beam_size, top_beams,
alpha, use_tpu=False):
"""Slow version of Beam search decoding.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do slow beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search.
Raises:
NotImplementedError: If use_tpu is set to true.
"""
batch_size = common_layers.shape_list(features["inputs"])[0]
def symbols_to_logits_fn(ids, i=None):
"""Go from ids to logits."""
ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]])
if "partial_targets" in features:
pt = features["partial_targets"]
pt_length = common_layers.shape_list(pt)[1]
pt = tf.tile(pt, [1, beam_size])
pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1])
ids = tf.concat([pt, ids], axis=1)
features["targets"] = ids
if i is not None:
features["decode_loop_step"] = i
self._coverage = None
logits, _ = self(features) # pylint: disable=not-callable
# now self._coverage is a coverage tensor for the first datashard.
# it has shape [batch_size] and contains floats between 0 and
# source_length.
if self._problem_hparams:
modality = self._problem_hparams.modality["targets"]
top = self._hparams.top.get("targets", modalities.get_top(modality))
if getattr(top, "pointwise", False):
return tf.squeeze(logits, axis=[1, 2, 3])
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :, :]
return tf.squeeze(logits, axis=[1, 2])
def _clone_examples_for_beam(old_feature, n):
"""Clone each example n times."""
old_shape = common_layers.shape_list(old_feature)
assert len(old_shape) >= 1
# Expand the inputs in to the beam size.
feature = tf.expand_dims(old_feature, 1)
feature = tf.tile(feature, [1, n] + [1] * (len(old_shape) - 1))
new_shape = common_layers.shape_list(feature)
feature = tf.reshape(feature,
[new_shape[0] * new_shape[1]] + new_shape[2:])
return feature
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
# Clone select features multiple times to account for beam size.
old_features = {}
for feature_name in ["inputs", "knowledge"]:
if feature_name not in features:
continue
old_features[feature_name] = features[feature_name]
features[feature_name] = _clone_examples_for_beam(
features[feature_name], beam_size)
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
# Setting decode length to input length + decode_length
if "partial_targets" not in features:
inputs = features["inputs"]
decode_length = (common_layers.shape_list(inputs)[1] +
features.get("decode_length", decode_length))
ids, scores, _ = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
stop_early=(top_beams == 1),
use_tpu=use_tpu)
# Set features back to the unexpanded form to not to confuse the
# Estimator!
features.update(old_features)
# Return `top_beams` decodings (also remove initial id from the beam search)
# TODO(lukaszkaiser): make it work multi-problem.
if top_beams == 1:
samples = ids[:, 0, 1:]
else:
samples = ids[:, :top_beams, 1:]
return {"outputs": samples, "scores": scores} |
A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
} | def _greedy_infer(self, features, decode_length, use_tpu=False):
"""A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if use_tpu:
return self._slow_greedy_infer_tpu(features, decode_length)
return self._slow_greedy_infer(features, decode_length) |
A slow greedy inference method on TPU.
Quadratic time in decode_length.
Args:
features: An map of string to `Tensor`.
decode_length: An integer, how many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
} | def _slow_greedy_infer_tpu(self, features, decode_length):
"""A slow greedy inference method on TPU.
Quadratic time in decode_length.
Args:
features: An map of string to `Tensor`.
decode_length: An integer, how many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
features["partial_targets"] = tf.to_int64(partial_targets)
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.modality["targets"]
def infer_step(i, recent_output, recent_logits, unused_loss):
"""Inference step."""
if not tf.executing_eagerly():
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
features["decode_loop_step"] = i
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.z
top = self._hparams.top.get("targets",
modalities.get_top(target_modality))
if getattr(top, "pointwise", False):
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:, i, :, :]
samples = tf.transpose(recent_output, perm=[1, 0, 2, 3])
samples = inplace_ops.alias_inplace_update(samples, i,
tf.to_int64(cur_sample))
samples = tf.transpose(samples, perm=[1, 0, 2, 3])
if not tf.executing_eagerly():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
recent_logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
recent_logits = inplace_ops.alias_inplace_update(
recent_logits, i, tf.squeeze(logits[:, -1:], axis=1))
logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
loss = sum([l for l in losses.values() if l is not None])
return i + 1, samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
initial_output = tf.slice(initial_output, [0, 0, 0, 0],
common_layers.shape_list(initial_output))
target_modality = self._problem_hparams.modality["targets"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
decode_length = 1
else:
if "partial_targets" in features:
prefix_length = common_layers.shape_list(features["partial_targets"])[1]
else:
prefix_length = common_layers.shape_list(features["inputs"])[1]
decode_length = prefix_length + decode_length
# Initial values of result, logits and loss.
result = tf.concat(
[initial_output,
tf.zeros([batch_size, decode_length, 1, 1], tf.int64)],
axis=1)
# tensor padded to [batch_size, decode_length, 1, 1, vocab_size]
vocab_size = self._problem_hparams.vocab_size["targets"]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
logits = tf.zeros((batch_size, decode_length, 1, 1, vocab_size))
if not tf.executing_eagerly():
logits.set_shape([None, None, None, None, None])
loss = 0.0
def while_exit_cond(i, result, logits, loss): # pylint: disable=unused-argument
"""Exit the loop either if reach decode_length or EOS."""
not_overflow = i < decode_length
if self._problem_hparams.stop_at_eos:
def fn_not_eos():
# Check if the last predicted element is a EOS
return tf.reduce_any(
tf.not_equal(
tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID))
not_eos = tf.cond(
# We only check for early stopping if there is at least 1 element (
# otherwise not_eos will crash).
tf.not_equal(i, 0),
fn_not_eos,
lambda: True,
)
return tf.cond(
tf.equal(batch_size, 1),
# If batch_size == 1, we check EOS for early stopping.
lambda: tf.logical_and(not_overflow, not_eos),
# Else, just wait for max length
lambda: not_overflow)
return not_overflow
_, result, logits, loss = tf.while_loop(
while_exit_cond,
infer_step, [tf.constant(0), result, logits, loss],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size, decode_length, 1, 1]),
tf.TensorShape([batch_size, decode_length, 1, 1, vocab_size]),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
# Reassign targets back to the previous value.
if targets_old is not None:
features["targets"] = targets_old
losses = {"training": loss}
if "partial_targets" in features:
partial_target_length = common_layers.shape_list(
features["partial_targets"])[1]
result = tf.slice(result, [0, partial_target_length, 0, 0],
[-1, -1, -1, -1])
return {
"outputs": result,
"scores": None,
"logits": logits,
"losses": losses,
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.