text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Builds a SubwordTextEncoder from the generated text.
<END_TASK>
<USER_TASK:>
Description:
def build_from_generator(cls,
generator,
target_size,
max_subtoken_length=None,
reserved_tokens=None):
"""Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`.
""" |
token_counts = collections.defaultdict(int)
for item in generator:
for tok in tokenizer.encode(native_to_unicode(item)):
token_counts[tok] += 1
encoder = cls.build_to_target_size(
target_size, token_counts, 1, 1e3,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
return encoder |
<SYSTEM_TASK:>
Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
<END_TASK>
<USER_TASK:>
Description:
def build_to_target_size(cls,
target_size,
token_counts,
min_val,
max_val,
max_subtoken_length=None,
reserved_tokens=None,
num_iterations=4):
"""Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
""" |
if min_val > max_val:
raise ValueError("Lower bound for the minimum token count "
"is greater than the upper bound.")
if target_size < 1:
raise ValueError("Target size must be positive.")
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
def bisect(min_val, max_val):
"""Bisection to find the right size."""
present_count = (max_val + min_val) // 2
tf.logging.info("Trying min_count %d" % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(
token_counts, present_count, num_iterations,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
# Being within 1% of the target size is ok.
is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size
# If min_val == max_val, we can't do any better than this.
if is_ok or min_val >= max_val or present_count < 2:
return subtokenizer
if subtokenizer.vocab_size > target_size:
other_subtokenizer = bisect(present_count + 1, max_val)
else:
other_subtokenizer = bisect(min_val, present_count - 1)
if other_subtokenizer is None:
return subtokenizer
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val) |
<SYSTEM_TASK:>
Train a SubwordTextEncoder based on a dictionary of word counts.
<END_TASK>
<USER_TASK:>
Description:
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
""" |
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info("vocab_size = %d" % self.vocab_size) |
<SYSTEM_TASK:>
Debugging dump of the current subtoken vocabulary.
<END_TASK>
<USER_TASK:>
Description:
def dump(self):
"""Debugging dump of the current subtoken vocabulary.""" |
subtoken_strings = [(i, s)
for s, i in six.iteritems(self._subtoken_string_to_id)]
print(u", ".join(u"{0} : '{1}'".format(i, s)
for i, s in sorted(subtoken_strings))) |
<SYSTEM_TASK:>
Load from a file object.
<END_TASK>
<USER_TASK:>
Description:
def _load_from_file_object(self, f):
"""Load from a file object.
Args:
f: File object to load vocabulary from
""" |
subtoken_strings = []
for line in f:
s = line.strip()
# Some vocab files wrap words in single quotes, but others don't
if ((s.startswith("'") and s.endswith("'")) or
(s.startswith("\"") and s.endswith("\""))):
s = s[1:-1]
subtoken_strings.append(native_to_unicode(s))
self._init_subtokens_from_list(subtoken_strings)
self._init_alphabet_from_tokens(subtoken_strings) |
<SYSTEM_TASK:>
Transform a string with a filename into a list of RGB integers.
<END_TASK>
<USER_TASK:>
Description:
def encode(self, s):
"""Transform a string with a filename into a list of RGB integers.
Args:
s: path to the file with an image.
Returns:
ids: list of integers
""" |
try:
import matplotlib.image as im # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Reading an image requires matplotlib to be installed: %s", e)
raise NotImplementedError("Image reading not implemented.")
return im.imread(s) |
<SYSTEM_TASK:>
Transform a sequence of int ids into an image file.
<END_TASK>
<USER_TASK:>
Description:
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
""" |
del strip_extraneous
_, tmp_file_path = tempfile.mkstemp("_decode.png")
if self._height is None or self._width is None:
size = int(math.sqrt(len(ids) / self._channels))
length = size * size * self._channels
else:
size = None
length = self._height * self._width * self._channels
if len(ids) != length:
raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x "
"channels (%d); %d != %d.\n Ids: %s"
% (len(ids), self._height, self._width, self._channels,
len(ids), length, " ".join([str(i) for i in ids])))
with tf.Graph().as_default():
raw = tf.constant(ids, dtype=tf.uint8)
if size is None:
img = tf.reshape(raw, [self._height, self._width, self._channels])
else:
img = tf.reshape(raw, [size, size, self._channels])
png = tf.image.encode_png(img)
op = tf.write_file(tmp_file_path, png)
with tf.Session() as sess:
sess.run(op)
return tmp_file_path |
<SYSTEM_TASK:>
Helper utility to make a tiled field of images from numpy arrays.
<END_TASK>
<USER_TASK:>
Description:
def _pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
""" |
shape = onp.shape(images)
width, height, depth = shape[-3:]
images = onp.reshape(images, (-1, width, height, depth))
batch = onp.shape(images)[0]
rows = onp.minimum(rows, batch)
cols = onp.minimum(batch // rows, cols)
images = images[:rows * cols]
images = onp.reshape(images, (rows, cols, width, height, depth))
images = onp.transpose(images, [0, 2, 1, 3, 4])
images = onp.reshape(images, [rows * width, cols * height, depth])
return images |
<SYSTEM_TASK:>
Convert an operative config string to markdown format.
<END_TASK>
<USER_TASK:>
Description:
def markdownify_operative_config_str(string):
"""Convert an operative config string to markdown format.""" |
# TODO(b/37527917): Total hack below. Implement more principled formatting.
def process(line):
"""Convert a single line to markdown format."""
if not line.startswith('#'):
return ' ' + line
line = line[2:]
if line.startswith('===='):
return ''
if line.startswith('None'):
return ' # None.'
if line.endswith(':'):
return '#### ' + line
return line
output_lines = []
for line in string.splitlines():
procd_line = process(line)
if procd_line is not None:
output_lines.append(procd_line)
return '\n'.join(output_lines) |
<SYSTEM_TASK:>
Saves scalar value.
<END_TASK>
<USER_TASK:>
Description:
def scalar(self, tag, value, step=None):
"""Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
""" |
value = float(onp.array(value))
if step is None:
step = self._step
else:
self._step = step
summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])
self.add_summary(summary, step) |
<SYSTEM_TASK:>
Saves matplotlib plot output to summary image.
<END_TASK>
<USER_TASK:>
Description:
def plot(self, tag, mpl_plt, step=None, close_plot=True):
"""Saves matplotlib plot output to summary image.
Args:
tag: str: label for this data
mpl_plt: matplotlib stateful pyplot object with prepared plotting state
step: int: training step
close_plot: bool: automatically closes plot
""" |
if step is None:
step = self._step
else:
self._step = step
fig = mpl_plt.get_current_fig_manager()
img_w, img_h = fig.canvas.get_width_height()
image_buf = io.BytesIO()
mpl_plt.savefig(image_buf, format='png')
image_summary = Summary.Image(
encoded_image_string=image_buf.getvalue(),
colorspace=4, # RGBA
height=img_h,
width=img_w)
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.add_summary(summary, step)
if close_plot:
mpl_plt.close() |
<SYSTEM_TASK:>
Saves audio.
<END_TASK>
<USER_TASK:>
Description:
def audio(self, tag, audiodata, step=None, sample_rate=44100):
"""Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer
""" |
audiodata = onp.array(audiodata)
if step is None:
step = self._step
else:
self._step = step
audiodata = onp.clip(onp.squeeze(audiodata), -1, 1)
if audiodata.ndim != 1:
raise ValueError('Audio data must be 1D.')
sample_list = (32767.0 * audiodata).astype(int).tolist()
wio = io.BytesIO()
wav_buf = wave.open(wio, 'wb')
wav_buf.setnchannels(1)
wav_buf.setsampwidth(2)
wav_buf.setframerate(sample_rate)
enc = b''.join([struct.pack('<h', v) for v in sample_list])
wav_buf.writeframes(enc)
wav_buf.close()
encoded_audio_bytes = wio.getvalue()
wio.close()
audio = Summary.Audio(
sample_rate=sample_rate,
num_channels=1,
length_frames=len(sample_list),
encoded_audio_string=encoded_audio_bytes,
content_type='audio/wav')
summary = Summary(value=[Summary.Value(tag=tag, audio=audio)])
self.add_summary(summary, step) |
<SYSTEM_TASK:>
Saves histogram of values.
<END_TASK>
<USER_TASK:>
Description:
def histogram(self, tag, values, bins, step=None):
"""Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
bins: number of bins in histogram, or array of bins for onp.histogram
step: int: training step
""" |
if step is None:
step = self._step
else:
self._step = step
values = onp.array(values)
bins = onp.array(bins)
values = onp.reshape(values, -1)
counts, limits = onp.histogram(values, bins=bins)
# boundary logic
cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32))
start, end = onp.searchsorted(
cum_counts, [0, cum_counts[-1] - 1], side='right')
start, end = int(start), int(end) + 1
counts = (
counts[start -
1:end] if start > 0 else onp.concatenate([[0], counts[:end]]))
limits = limits[start:end + 1]
sum_sq = values.dot(values)
histo = HistogramProto(
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits.tolist(),
bucket=counts.tolist())
summary = Summary(value=[Summary.Value(tag=tag, histo=histo)])
self.add_summary(summary, step) |
<SYSTEM_TASK:>
Saves a text summary.
<END_TASK>
<USER_TASK:>
Description:
def text(self, tag, textdata, step=None):
"""Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard.
""" |
if step is None:
step = self._step
else:
self._step = step
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(plugin_name='text'))
if isinstance(textdata, (str, bytes)):
tensor = tf.make_tensor_proto(
values=[textdata.encode(encoding='utf_8')], shape=(1,))
else:
textdata = onp.array(textdata) # convert lists, jax arrays, etc.
datashape = onp.shape(textdata)
if len(datashape) == 1:
tensor = tf.make_tensor_proto(
values=[td.encode(encoding='utf_8') for td in textdata],
shape=(datashape[0],))
elif len(datashape) == 2:
tensor = tf.make_tensor_proto(
values=[
td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1)
],
shape=(datashape[0], datashape[1]))
summary = Summary(
value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
self.add_summary(summary, step) |
<SYSTEM_TASK:>
Import module at usr_dir, if provided.
<END_TASK>
<USER_TASK:>
Description:
def import_usr_dir(usr_dir):
"""Import module at usr_dir, if provided.""" |
if not usr_dir:
return
if usr_dir == INTERNAL_USR_DIR_PACKAGE:
# The package has been installed with pip under this name for Cloud ML
# Engine so just import it.
importlib.import_module(INTERNAL_USR_DIR_PACKAGE)
return
dir_path = os.path.abspath(os.path.expanduser(usr_dir).rstrip("/"))
containing_dir, module_name = os.path.split(dir_path)
tf.logging.info("Importing user module %s from path %s", module_name,
containing_dir)
sys.path.insert(0, containing_dir)
importlib.import_module(module_name)
sys.path.pop(0) |
<SYSTEM_TASK:>
Check if name is in orig_ctr or in one of the other type containers.
<END_TASK>
<USER_TASK:>
Description:
def _check_reset_and_type_change(self, name, orig_ctr):
"""Check if name is in orig_ctr or in one of the other type containers.""" |
# Resetting a hyperparameter
if name in orig_ctr:
tf.logging.warning("Overwriting hparam %s", name)
ctr_names = [
(self._categorical_params, "categorical"),
(self._discrete_params, "discrete"),
(self._float_params, "float"),
(self._int_params, "int"),
]
ctrs, names = list(zip(*ctr_names))
orig_name = names[ctrs.index(orig_ctr)]
for ctr, ctr_name in ctr_names:
if ctr is orig_ctr:
continue
# Using a different type for the same hyperparameter name
if name in ctr:
raise ValueError("Setting hyperparameter %s as type %s, but a "
"hyperparemeter of the same name was originally "
"registered as type %s" % (name, ctr_name, orig_name)) |
<SYSTEM_TASK:>
Create and register problems for the game.
<END_TASK>
<USER_TASK:>
Description:
def register_game(game_name, game_mode="NoFrameskip-v4"):
"""Create and register problems for the game.
Args:
game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist".
game_mode: the frame skip and sticky keys config.
Raises:
ValueError: if game_name or game_mode are wrong.
""" |
if game_name not in ATARI_GAMES:
raise ValueError("Game %s not in ATARI_GAMES" % game_name)
if game_mode not in ATARI_GAME_MODES:
raise ValueError("Unknown ATARI game mode: %s." % game_mode)
camel_game_name = misc_utils.snakecase_to_camelcase(game_name) + game_mode
# Create and register the Problem
cls = type("Gym%sRandom" % camel_game_name,
(T2TGymEnv,), {"base_env_name": camel_game_name})
registry.register_problem(cls) |
<SYSTEM_TASK:>
Makes a step in all environments.
<END_TASK>
<USER_TASK:>
Description:
def step(self, actions):
"""Makes a step in all environments.
Does any preprocessing and records frames.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
Raises:
ValueError: when the data for current epoch has already been loaded.
""" |
if self._store_rollouts and \
self._rollouts_by_epoch_and_split[self.current_epoch]:
raise ValueError(
"Data for current epoch has already been loaded from disk."
)
(obs, unclipped_rewards, dones) = self._step(actions)
obs = self._preprocess_observations(obs)
(min_reward, max_reward) = self.reward_range
rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward))
if self._store_rollouts:
unclipped_rewards = unclipped_rewards.astype(np.float64)
encoded_obs = self._encode_observations(obs)
for (rollout, frame, action) in zip(
self._current_batch_rollouts, self._current_batch_frames, actions
):
rollout.append(frame._replace(action=action))
# orud = (observation, reward, unclipped_reward, done)
self._current_batch_frames = [
Frame(*orud, action=None)
for orud in zip(encoded_obs, rewards, unclipped_rewards, dones)
]
return (obs, rewards, dones) |
<SYSTEM_TASK:>
Additional data fields to store on disk and their decoders.
<END_TASK>
<USER_TASK:>
Description:
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders.""" |
field_names = ("frame_number", "action", "reward", "done")
data_fields = {
name: tf.FixedLenFeature([1], tf.int64) for name in field_names
}
decoders = {
name: tf.contrib.slim.tfexample_decoder.Tensor(tensor_key=name)
for name in field_names
}
return (data_fields, decoders) |
<SYSTEM_TASK:>
Splits frames in the current epoch according to self.dataset_splits.
<END_TASK>
<USER_TASK:>
Description:
def _split_current_epoch(self):
"""Splits frames in the current epoch according to self.dataset_splits.
Rollouts can be broken on shard boundary. This is desirable when we have
few long rollouts and we want to make sure we have data in the dev set.
""" |
num_frames = self._calc_num_frames(self._current_epoch_rollouts)
num_shards = sum(split["shards"] for split in self.dataset_splits)
shard_size = num_frames // num_shards
splits = self.dataset_splits
num_saved_frames = 0
split_index = 0
split_begin_index = 0
rollouts_by_split = collections.defaultdict(list)
def split_size(split_index):
return splits[split_index]["shards"] * shard_size
for rollout in self._current_epoch_rollouts:
num_saved_frames_current_rollout = 0
# Split the rollout into chunks corresponding to dataset splits. In most
# cases there should be only one chunk. On dataset split boundary there
# will be two. If a rollout is longer then the size of a dataset split,
# there might be more.
while num_saved_frames_current_rollout < len(rollout):
max_chunk_length = (
split_begin_index + split_size(split_index) - num_saved_frames
)
if split_index == len(splits) - 1:
# Put the remainder in the last split to preserve the ordering.
max_chunk_length = len(rollout)
rollout_chunk = rollout[
num_saved_frames_current_rollout:
(num_saved_frames_current_rollout + max_chunk_length)
]
rollouts_by_split[splits[split_index]["split"]].append(rollout_chunk)
num_saved_frames_current_rollout += len(rollout_chunk)
num_saved_frames += len(rollout_chunk)
if num_saved_frames == split_begin_index + split_size(split_index):
split_begin_index += split_size(split_index)
split_index = min(split_index + 1, len(splits) - 1)
self._rollouts_by_epoch_and_split[self.current_epoch] = rollouts_by_split
self._current_epoch_rollouts = [] |
<SYSTEM_TASK:>
Optionally converts images from hooks_args to image summaries.
<END_TASK>
<USER_TASK:>
Description:
def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
""" |
decode_hparams = hook_args.decode_hparams
if not decode_hparams.display_decoded_images:
return []
predictions = hook_args.predictions[0]
# Display ten random inputs and outputs so that tensorboard does not hang.
all_summaries = []
rand_predictions = np.random.choice(predictions, size=10)
for ind, prediction in enumerate(rand_predictions):
output_summary = image_to_tf_summary_value(
prediction["outputs"], tag="%d_output" % ind)
input_summary = image_to_tf_summary_value(
prediction["inputs"], tag="%d_input" % ind)
all_summaries.append(input_summary)
all_summaries.append(output_summary)
return all_summaries |
<SYSTEM_TASK:>
image resize function used by quite a few image problems.
<END_TASK>
<USER_TASK:>
Description:
def resize_by_area(img, size):
"""image resize function used by quite a few image problems.""" |
return tf.to_int64(
tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA)) |
<SYSTEM_TASK:>
Generator for images that takes image and labels lists and creates pngs.
<END_TASK>
<USER_TASK:>
Description:
def image_generator(images, labels):
"""Generator for images that takes image and labels lists and creates pngs.
Args:
images: list of images given as [width x height x channels] numpy arrays.
labels: list of ints, same length as images.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as PNG,
* image/format: the string "png" representing image format,
* image/class/label: an integer representing the label,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a singleton list of the corresponding type.
Raises:
ValueError: if images is an empty list.
""" |
if not images:
raise ValueError("Must provide some images for the generator.")
width, height, _ = images[0].shape
for (enc_image, label) in zip(encode_images_as_png(images), labels):
yield {
"image/encoded": [enc_image],
"image/format": ["png"],
"image/class/label": [int(label)],
"image/height": [height],
"image/width": [width]
} |
<SYSTEM_TASK:>
Apply random horizontal and vertical shift to images.
<END_TASK>
<USER_TASK:>
Description:
def random_shift(image, wsr=0.1, hsr=0.1):
"""Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images translated by the provided wsr and hsr.
""" |
height, width, _ = common_layers.shape_list(image)
width_range, height_range = wsr*width, hsr*height
height_translations = tf.random_uniform((1,), -height_range, height_range)
width_translations = tf.random_uniform((1,), -width_range, width_range)
translations = tf.concat((height_translations, width_translations), axis=0)
return tf.contrib.image.translate(image, translations=translations) |
<SYSTEM_TASK:>
Adds the hparams used by get_standardized_layers.
<END_TASK>
<USER_TASK:>
Description:
def add_standard_attention_hparams(hparams):
"""Adds the hparams used by get_standardized_layers.""" |
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
# hparams used and which should have been defined outside (in
# common_hparams):
# Global flags
# hparams.mode
# hparams.hidden_size
# Pre-post processing flags
# hparams.layer_preprocess_sequence
# hparams.layer_postprocess_sequence
# hparams.layer_prepostprocess_dropout
# hparams.norm_type
# hparams.norm_epsilon
# Mixture-of-Expert flags
# hparams.moe_hidden_sizes
# hparams.moe_num_experts
# hparams.moe_k
# hparams.moe_loss_coef
# Attention layers flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("attention_dropout", 0.0)
# Attention: Local
hparams.add_hparam("attention_loc_block_length", 256)
# Attention: Local (unmasked only): How much to look left.
hparams.add_hparam("attention_loc_block_width", 128)
# Attention: Memory-compressed
hparams.add_hparam("attention_red_factor", 3)
hparams.add_hparam("attention_red_type", "conv")
hparams.add_hparam("attention_red_nonlinearity", "none")
# Fully connected layers flags
# To be more consistent, should use filter_size to also control the MOE
# size if moe_hidden_sizes not set.
hparams.add_hparam("filter_size", 2048)
hparams.add_hparam("relu_dropout", 0.0)
return hparams |
<SYSTEM_TASK:>
Computes encdec attention loss between expected and actual attentions.
<END_TASK>
<USER_TASK:>
Description:
def encoder_decoder_attention_loss(expected_attention_logits,
actual_attentions,
loss_type="kl_divergence",
loss_multiplier=1.0):
"""Computes encdec attention loss between expected and actual attentions.
Args:
expected_attention_logits: Tensor storing the expected encoder-decoder
attention logits with shape [batch_size, target_length, input_length].
actual_attentions: Dictionary with actual attention logits for different
attention types and hidden layers.
loss_type: type of the loss function.
loss_multiplier: multiplier for the attention loss.
Returns:
KL_divergence loss between the actual and expected attention logits.
""" |
def combine_attentions(attention_list):
"""Combine different layer attentions and then average over layers/heads."""
# Stack all hidden layer attention tensors to get a tensor with shape
# [num_hidden_layers, batch_size, num_heads, target_length, input_length].
attentions = tf.stack(attention_list)
# Reduce mean across all layers (axis=0) and all heads (axis=2) to get a
# tensor with shape [batch_size, target_length, input_length].
return tf.reduce_mean(attentions, [0, 2])
def kl_divergence_loss(expected_logits, actual_logits):
p = tfp.distributions.Categorical(logits=expected_logits)
q = tfp.distributions.Categorical(logits=actual_logits)
return tfp.distributions.kl_divergence(p, q)
def mse_loss(expected_logits, actual_weights):
expected_weights = tf.nn.softmax(expected_logits)
return tf.losses.mean_squared_error(expected_weights, actual_weights)
# For each hidden layer, we have attention-logit and attention-weight tensors
# with shape [batch_size, num_heads, target_length, input_length].
loss = 0.0
if loss_type == "mse":
actual_encdec_attention_weights = [
t for layer_key, t in actual_attentions.items()
if "encdec_attention" in layer_key and not layer_key.endswith("/logits")
]
actual_attention_weights = combine_attentions(
actual_encdec_attention_weights)
loss = mse_loss(expected_attention_logits, actual_attention_weights)
else:
actual_encdec_attention_logits = [
t for layer_key, t in actual_attentions.items()
if "encdec_attention" in layer_key and layer_key.endswith("/logits")
]
actual_attention_logits = combine_attentions(actual_encdec_attention_logits)
loss = kl_divergence_loss(expected_attention_logits,
actual_attention_logits)
return loss * loss_multiplier |
<SYSTEM_TASK:>
Gets a bunch of sinusoids of different frequencies.
<END_TASK>
<USER_TASK:>
Description:
def get_timing_signal_1d(length,
channels,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
"""Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
""" |
position = tf.to_float(tf.range(length) + start_index)
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
tf.maximum(tf.to_float(num_timescales) - 1, 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return signal |
<SYSTEM_TASK:>
Adds sinusoids of diff frequencies to a Tensor, with timing position given.
<END_TASK>
<USER_TASK:>
Description:
def add_timing_signal_1d_given_position(x,
position,
min_timescale=1.0,
max_timescale=1.0e4):
"""Adds sinusoids of diff frequencies to a Tensor, with timing position given.
Args:
x: a Tensor with shape [batch, length, channels]
position: a Tensor with shape [batch, length]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x.
""" |
channels = common_layers.shape_list(x)[2]
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = (
tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
tf.expand_dims(inv_timescales, 0), 0))
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
signal = common_layers.cast_like(signal, x)
return x + signal |
<SYSTEM_TASK:>
Adds positional embedding.
<END_TASK>
<USER_TASK:>
Description:
def add_positional_embedding(x, max_length, name=None, positions=None):
"""Adds positional embedding.
Args:
x: Tensor with shape [batch, length, depth].
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
positions: Tensor with shape [batch, length].
Returns:
Tensor of same shape as x.
""" |
with tf.name_scope("add_positional_embedding"):
_, length, depth = common_layers.shape_list(x)
var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
if positions is None:
pad_length = tf.maximum(0, length - max_length)
sliced = tf.cond(
tf.less(length, max_length),
lambda: tf.slice(var, [0, 0], [length, -1]),
lambda: tf.pad(var, [[0, pad_length], [0, 0]]))
return x + tf.expand_dims(sliced, 0)
else:
return x + tf.gather(var, tf.to_int32(positions)) |
<SYSTEM_TASK:>
Adds n-dimensional positional embedding.
<END_TASK>
<USER_TASK:>
Description:
def add_positional_embedding_nd(x, max_length, name=None):
"""Adds n-dimensional positional embedding.
The embeddings add to all positional dimensions of the tensor.
Args:
x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional
dimensions, i.e., 1 for text, 2 for images, 3 for video, etc.
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
Returns:
Tensor of same shape as x.
""" |
with tf.name_scope("add_positional_embedding_nd"):
x_shape = common_layers.shape_list(x)
num_dims = len(x_shape) - 2
depth = x_shape[-1]
base_shape = [1] * (num_dims + 1) + [depth]
base_start = [0] * (num_dims + 2)
base_size = [-1] + [1] * num_dims + [depth]
for i in range(num_dims):
shape = base_shape[:]
start = base_start[:]
size = base_size[:]
shape[i + 1] = max_length
size[i + 1] = x_shape[i + 1]
var = tf.get_variable(
name + "_%d" % i,
shape,
initializer=tf.random_normal_initializer(0, depth**-0.5))
var = var * depth**0.5
x += tf.slice(var, start, size)
return x |
<SYSTEM_TASK:>
Gets edge vectors for the edge types in the adjacency matrix.
<END_TASK>
<USER_TASK:>
Description:
def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None):
"""Gets edge vectors for the edge types in the adjacency matrix.
Args:
adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints.
num_edge_types: Number of different edge types
depth: Number of channels
name: a string
Returns:
A [batch, num_nodes, num_nodes, depth] vector of tensors
""" |
with tf.variable_scope(name, default_name="edge_vectors"):
att_adj_vectors_shape = [num_edge_types, depth]
adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix)
adj_vectors = (
tf.get_variable(
"adj_vectors",
att_adj_vectors_shape,
initializer=tf.random_normal_initializer(0, depth**-0.5)) *
(depth**0.5))
# Avoiding gathers so that it works on TPUs
# adjacency_matrix_one_hot has shape
# [batch, num_nodes, num_nodes, num_edge_types]
adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types)
att_adj_vectors = tf.matmul(
tf.reshape(tf.to_float(adjacency_matrix_one_hot), [-1, num_edge_types]),
adj_vectors)
return tf.reshape(att_adj_vectors,
[adjacency_matrix_shape[0], adjacency_matrix_shape[1],
adjacency_matrix_shape[2], depth]) |
<SYSTEM_TASK:>
Calculate the length of mask based on padding.
<END_TASK>
<USER_TASK:>
Description:
def padding_to_length(padding):
"""Calculate the length of mask based on padding.
Args:
padding: a Tensor with shape [..., length].
Returns:
a Tensor with shape [...].
""" |
non_padding = 1.0 - padding
return tf.to_int32(tf.reduce_sum(non_padding, axis=-1)) |
<SYSTEM_TASK:>
Create a bias tensor for prepend_mode="prepend_inputs_full_attention".
<END_TASK>
<USER_TASK:>
Description:
def attention_bias_prepend_inputs_full_attention(padding):
"""Create a bias tensor for prepend_mode="prepend_inputs_full_attention".
See prepend_inputs in common_hparams.py.
Produces a bias tensor to be used in self-attention.
This bias tensor allows for full connectivity in the "inputs" part of
the sequence and masked connectivity in the targets part.
Args:
padding: a float `Tensor` with shape [batch, length] with
ones in positions corresponding to padding. In each row, a single
padding position separates the input part from the target part.
Returns:
a `Tensor` with shape [batch, 1, length, length].
""" |
# Everything past the first padding position is part of the target.
# This Tensor has zeros for the source portion and separator,
# and ones for the target portion.
in_target = tf.cumsum(padding, axis=1, exclusive=True)
# The position within the target, or 0 if part of the source.
target_pos = tf.cumsum(in_target, axis=1)
# A position with a lesser target_pos cannot see a position with greater
# target_pos.
illegal_connections = tf.greater(
tf.expand_dims(target_pos, 1), tf.expand_dims(target_pos, 2))
bias = tf.to_float(illegal_connections) * -1e9
bias = tf.expand_dims(bias, 1)
return bias |
<SYSTEM_TASK:>
Bias for self-attention to encourage attention to close positions.
<END_TASK>
<USER_TASK:>
Description:
def attention_bias_proximal(length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
""" |
r = tf.to_float(tf.range(length))
diff = tf.expand_dims(r, 0) - tf.expand_dims(r, 1)
return tf.expand_dims(tf.expand_dims(-tf.log1p(tf.abs(diff)), 0), 0) |
<SYSTEM_TASK:>
Generate a mask to prevent the batch to attend to each others.
<END_TASK>
<USER_TASK:>
Description:
def attention_bias_batch(batch_coordinates_q,
batch_coordinates_k=None,
condition_fn=None):
"""Generate a mask to prevent the batch to attend to each others.
Args:
batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the
coordinates of the batches
batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the
coordinates of the batches. If None, do self-attention.
condition_fn: Callable defining the attention mask.
Returns:
Float-like Tensor of shape [length_q, length_k] containing either 0 or
-infinity (-1e9).
""" |
if batch_coordinates_k is None:
batch_coordinates_k = batch_coordinates_q
# Convert to float first because of b/25387198.
def to_float(bc):
bc = tf.squeeze(bc, 1)
bc = tf.to_float(bc)
return bc
# Broadcast to create [length_q, length_k] mask.
bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1)
bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0)
bias_batch = bc_h - bc_v
bias_batch = condition_fn(bias_batch)
bias_batch *= -1e9
return bias_batch |
<SYSTEM_TASK:>
Reshape x so that the last dimension becomes two dimensions.
<END_TASK>
<USER_TASK:>
Description:
def split_last_dimension(x, n):
"""Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
""" |
x_shape = common_layers.shape_list(x)
m = x_shape[-1]
if isinstance(m, int) and isinstance(n, int):
assert m % n == 0
return tf.reshape(x, x_shape[:-1] + [n, m // n]) |
<SYSTEM_TASK:>
Reshape x so that the last two dimension become one.
<END_TASK>
<USER_TASK:>
Description:
def combine_last_two_dimensions(x):
"""Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
""" |
x_shape = common_layers.shape_list(x)
a, b = x_shape[-2:]
return tf.reshape(x, x_shape[:-2] + [a * b]) |
<SYSTEM_TASK:>
Reshape x so that the first two dimension become one.
<END_TASK>
<USER_TASK:>
Description:
def combine_first_two_dimensions(x):
"""Reshape x so that the first two dimension become one.
Args:
x: a Tensor with shape [a, b, ...]
Returns:
a Tensor with shape [ab, ...]
""" |
ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0))
old_shape = x.get_shape().dims
a, b = old_shape[:2]
new_shape = [a * b if a and b else None] + old_shape[2:]
ret.set_shape(new_shape)
return ret |
<SYSTEM_TASK:>
Compute color image summary.
<END_TASK>
<USER_TASK:>
Description:
def attention_image_summary(attn, image_shapes=None):
"""Compute color image summary.
Args:
attn: a Tensor with shape [batch, num_heads, query_length, memory_length]
image_shapes: optional tuple of integer scalars.
If the query positions and memory positions represent the
pixels of flattened images, then pass in their dimensions:
(query_rows, query_cols, memory_rows, memory_cols).
If the query positions and memory positions represent the
pixels x channels of flattened images, then pass in their dimensions:
(query_rows, query_cols, query_channels,
memory_rows, memory_cols, memory_channels).
""" |
attn = tf.cast(attn, tf.float32)
num_heads = common_layers.shape_list(attn)[1]
# [batch, query_length, memory_length, num_heads]
image = tf.transpose(attn, [0, 2, 3, 1])
image = tf.pow(image, 0.2) # for high-dynamic-range
# Each head will correspond to one of RGB.
# pad the heads to be a multiple of 3
image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, tf.mod(-num_heads, 3)]])
image = split_last_dimension(image, 3)
image = tf.reduce_max(image, 4)
if image_shapes is not None:
if len(image_shapes) == 4:
q_rows, q_cols, m_rows, m_cols = list(image_shapes)
image = tf.reshape(image, [-1, q_rows, q_cols, m_rows, m_cols, 3])
image = tf.transpose(image, [0, 1, 3, 2, 4, 5])
image = tf.reshape(image, [-1, q_rows * m_rows, q_cols * m_cols, 3])
else:
assert len(image_shapes) == 6
q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels = list(
image_shapes)
image = tf.reshape(
image,
[-1, q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels, 3])
image = tf.transpose(image, [0, 1, 4, 3, 2, 5, 6, 7])
image = tf.reshape(
image,
[-1, q_rows * m_rows * q_channnels, q_cols * m_cols * m_channels, 3])
tf.summary.image("attention", image, max_outputs=1) |
<SYSTEM_TASK:>
Make attention weights non-0 only on the top-hard_attention_k ones.
<END_TASK>
<USER_TASK:>
Description:
def harden_attention_weights(weights, hard_attention_k):
"""Make attention weights non-0 only on the top-hard_attention_k ones.""" |
# Subtract the top-kth weight and zero-out all lower ones.
# Note that currently in case of numerical ties it will retain more
# than k elements. In the future, we may want to avoid this.
weights -= common_layers.top_kth_iterative(weights, hard_attention_k)
weights = tf.nn.relu(weights)
# Re-normalize the weights.
weights_sum = tf.reduce_sum(weights, axis=-1, keep_dims=True)
weights_sum = tf.maximum(weights_sum, 1e-6) # Avoid division by 0.
weights /= weights_sum
return weights |
<SYSTEM_TASK:>
Generates matrix of relative positions between inputs.
<END_TASK>
<USER_TASK:>
Description:
def _generate_relative_positions_matrix(length_q, length_k,
max_relative_position,
cache=False):
"""Generates matrix of relative positions between inputs.""" |
if not cache:
if length_q == length_k:
range_vec_q = range_vec_k = tf.range(length_q)
else:
range_vec_k = tf.range(length_k)
range_vec_q = range_vec_k[-length_q:]
distance_mat = range_vec_k[None, :] - range_vec_q[:, None]
else:
distance_mat = tf.expand_dims(tf.range(-length_k+1, 1, 1), 0)
distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position,
max_relative_position)
# Shift values to be >= 0. Each integer still uniquely identifies a relative
# position difference.
final_mat = distance_mat_clipped + max_relative_position
return final_mat |
<SYSTEM_TASK:>
Relative position-aware dot-product attention inner calculation.
<END_TASK>
<USER_TASK:>
Description:
def _relative_attention_inner(x, y, z, transpose):
"""Relative position-aware dot-product attention inner calculation.
This batches matrix multiply calculations to avoid unnecessary broadcasting.
Args:
x: Tensor with shape [batch_size, heads, length or 1, length or depth].
y: Tensor with shape [batch_size, heads, length or 1, depth].
z: Tensor with shape [length or 1, length, depth].
transpose: Whether to transpose inner matrices of y and z. Should be true if
last dimension of x is depth, not length.
Returns:
A Tensor with shape [batch_size, heads, length, length or depth].
""" |
batch_size = tf.shape(x)[0]
heads = x.get_shape().as_list()[1]
length = tf.shape(x)[2]
# xy_matmul is [batch_size, heads, length or 1, length or depth]
xy_matmul = tf.matmul(x, y, transpose_b=transpose)
# x_t is [length or 1, batch_size, heads, length or depth]
x_t = tf.transpose(x, [2, 0, 1, 3])
# x_t_r is [length or 1, batch_size * heads, length or depth]
x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1])
# x_tz_matmul is [length or 1, batch_size * heads, length or depth]
x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose)
# x_tz_matmul_r is [length or 1, batch_size, heads, length or depth]
x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1])
# x_tz_matmul_r_t is [batch_size, heads, length or 1, length or depth]
x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3])
return xy_matmul + x_tz_matmul_r_t |
<SYSTEM_TASK:>
Helper to dot_product_self_attention_relative_v2.
<END_TASK>
<USER_TASK:>
Description:
def _relative_position_to_absolute_position_masked(x):
"""Helper to dot_product_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position - query_position + length - 1]
The dimensions of the output represent:
[batch, heads, query_position, memory_position]
Only works with masked_attention. Undefined behavior for regions of the
input where memory_position > query_position.
Args:
x: a Tensor with shape [batch, heads, length, length]
Returns:
a Tensor with shape [batch, heads, length, length]
""" |
batch, heads, length, _ = common_layers.shape_list(x)
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = tf.reshape(x, [batch, heads, 1 + length, length])
x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1])
return x |
<SYSTEM_TASK:>
Helper function for dot_product_unmasked_self_attention_relative_v2.
<END_TASK>
<USER_TASK:>
Description:
def _absolute_position_to_relative_position_unmasked(x):
"""Helper function for dot_product_unmasked_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position]
The dimensions of the output represent:
[batch, heads, query_position, memory_position - query_position + length - 1]
Only works with unmasked_attention.
Args:
x: a Tensor with shape [batch, heads, length, length]
Returns:
a Tensor with shape [batch, heads, length, 2*length-1]
""" |
batch, heads, length, _ = common_layers.shape_list(x)
# padd along column
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, length-1]])
x_flat = tf.reshape(x, [batch, heads, length**2 + length*(length -1)])
# add 0's in the beginning that will skew the elements after reshape
x_flat = tf.pad(x_flat, [[0, 0], [0, 0], [length, 0]])
x = tf.reshape(x_flat, [batch, heads, length, 2*length])
x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length,
2*length -1])
return x |
<SYSTEM_TASK:>
Instantiate or retrieve relative embeddings, sliced according to length.
<END_TASK>
<USER_TASK:>
Description:
def get_relative_embeddings_left_right(max_relative_position, length, depth,
num_heads,
heads_share_relative_embedding,
name):
"""Instantiate or retrieve relative embeddings, sliced according to length.
Use for unmasked case where the relative attention looks both left and right.
Args:
max_relative_position: an Integer for the number of entries in the relative
embedding, which corresponds to the max relative distance that is
considered.
length: an Integer, specifies the length of the input sequence for which
this relative embedding is retrieved for.
depth: an Integer, specifies the depth for relative embeddings.
num_heads: an Integer, specifies the number of heads.
heads_share_relative_embedding: a Boolean specifying if the relative
embedding is shared across heads.
name: a string giving the name of the embedding variables.
Returns:
a Tensor with shape [length, depth]
""" |
initializer_stddev = depth**-0.5
max_relative_position_unmasked = 2 * max_relative_position - 1
if heads_share_relative_embedding:
embedding_shape = (max_relative_position_unmasked, depth)
else:
embedding_shape = (num_heads, max_relative_position_unmasked, depth)
relative_embeddings = tf.get_variable(
name=name, shape=embedding_shape,
initializer=tf.random_normal_initializer(stddev=initializer_stddev))
# Pad first before slice to avoid using tf.cond.
pad_length = tf.maximum(length - max_relative_position, 0)
slice_start_position = tf.maximum(max_relative_position-length, 0)
if heads_share_relative_embedding:
padded_relative_embeddings = tf.pad(
relative_embeddings,
[[pad_length, pad_length], [0, 0]])
used_relative_embeddings = tf.slice(
padded_relative_embeddings,
[slice_start_position, 0], [2 * length - 1, -1])
else:
padded_relative_embeddings = tf.pad(
relative_embeddings,
[[0, 0], [pad_length, pad_length], [0, 0]])
used_relative_embeddings = tf.slice(
padded_relative_embeddings,
[0, slice_start_position, 0], [-1, 2 * length - 1, -1])
return used_relative_embeddings |
<SYSTEM_TASK:>
Helper function. Assumes that memory_flange is half of query sizes.
<END_TASK>
<USER_TASK:>
Description:
def _get_left_right_blocks(x):
"""Helper function. Assumes that memory_flange is half of query sizes.
This function splits the tensor of width 'n' into two halves, where the
first half gets the width indices 0, 2, 4.. and the second half gets the
width indices 3, 5, ... We also fuse two blocks along the h dimension.
Args:
x: a 6-d tensor.
Returns:
x_left_blocks, x_right_blocks: Two 6-d tensors
""" |
(_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h,
x_memory_flange_w, depth) = common_layers.shape_list(x)
x_left_right_blocks = tf.slice(x,
[0, 1, 0, 0, 0, 0],
[-1, x_num_outer_h_blocks-2, -1, -1,
-1, -1])
num_blocks_h = (x_num_outer_h_blocks-2)//2
x_left_right_blocks = tf.reshape(x_left_right_blocks,
[-1,
num_blocks_h,
2, x_num_outer_w_blocks,
x_memory_flange_h,
x_memory_flange_w, depth])
x_left_right_blocks = tf.transpose(x_left_right_blocks,
[0, 1, 3, 2, 4, 5, 6])
x_left_right_blocks = tf.reshape(x_left_right_blocks,
[-1, num_blocks_h,
x_num_outer_w_blocks, 2*x_memory_flange_h,
x_memory_flange_w, depth])
# get it ready for splitting the left and right memory blocks
x_left_blocks, x_right_blocks = _split_along_width(x_left_right_blocks)
return x_left_blocks, x_right_blocks |
<SYSTEM_TASK:>
Stitches together the local 2d memory blocks.
<END_TASK>
<USER_TASK:>
Description:
def get_2d_local_memory(x, query_shape, memory_flange):
"""Stitches together the local 2d memory blocks.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num_h_blocks, num_w_blocks,
query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]
tensor.
""" |
(_, height, width, depth_x) = common_layers.shape_list(x)
x_center_blocks = _extract_blocks(x, query_shape[0], query_shape[1])
# add extra padding to x so that we can extract the memory region
# around the center
paddings = [[0, 0], [memory_flange[0], memory_flange[0]],
[memory_flange[1], memory_flange[1]], [0, 0]]
padded_x = tf.pad(x, paddings)
padded_x.set_shape([None, height+2*memory_flange[0],
width+2*memory_flange[1], depth_x])
x_outer_memory_blocks = _extract_blocks(padded_x,
memory_flange[0], memory_flange[1])
# We'll extract left and right memory blocks, top and bottom memory blocks,
# and then the corner memory blocks
# Each of these after will have shape
# [batch, num_h_blocks, num_w_blocks, query_shape[0],
# memory_flange[1], depth]
x_left_blocks, x_right_blocks = _get_left_right_blocks(
x_outer_memory_blocks)
t_hw_block = lambda x: tf.transpose(x, [0, 2, 1, 4, 3, 5])
# now to get top and bottom blocks, we should just transpose the outer
# blocks, call the same function and transpose back to get shape
# [batch, num_h_blocks, num_w_blocks, memory_flange[0],
# query_shape[1], depth]
x_top_center_blocks, x_bottom_center_blocks = (
map(t_hw_block, _get_left_right_blocks(
t_hw_block(x_outer_memory_blocks))))
# now to get the corner blocks
x_left_corner_blocks, x_right_corner_blocks = _split_along_width(
x_outer_memory_blocks)
# now to extract top and bottom for both k and v
# we need to transpose because _split_along_width separates along
# the width
# each of these should have shape [batch, num_h_blocks,
# num_w_blocks, memory_flange[0], memory_flange[1], depth]
t_hw = lambda x: tf.transpose(x, [0, 2, 1, 3, 4, 5])
x_top_left_corner_blocks, x_bottom_left_corner_blocks = (
map(t_hw, _split_along_width(t_hw(x_left_corner_blocks))))
x_top_right_corner_blocks, x_bottom_right_corner_blocks = (
map(t_hw, _split_along_width(t_hw(x_right_corner_blocks))))
# The memory is top_left top_center top_right
# left_center middle right_center
# bottom_left bottom_center bottom_right
# Assembling the above row by row
# first [x_top_left, x_top, x_top_right]
# to get [batch, num_h_blocks, num_w_blocks, memory_flange[0],
# query_shape[1]+2*memory_flange[1], depth]
# then [x_left, x_center, x_right]
# then [x_bottom_left, x_bottom, x_bottom_right]
x_top_memory = tf.concat(
[x_top_left_corner_blocks,
x_top_center_blocks,
x_top_right_corner_blocks], axis=4)
x_middle_memory = tf.concat(
[x_left_blocks, x_center_blocks, x_right_blocks], axis=4)
x_bottom_memory = tf.concat(
[x_bottom_left_corner_blocks,
x_bottom_center_blocks,
x_bottom_right_corner_blocks], axis=4)
# concat along height
x = tf.concat([x_top_memory, x_middle_memory, x_bottom_memory], axis=3)
return x |
<SYSTEM_TASK:>
Gathering memory blocks around query blocks. flange is half of query .
<END_TASK>
<USER_TASK:>
Description:
def get_2d_local_memory_v2(x, query_shape, memory_flange):
"""Gathering memory blocks around query blocks. flange is half of query .
Only works if memory flanges are half of query sizes.
Args:
x: a [batch, height, width, depth tensor]
query_shape: 2-d integer list of query shape
memory_flange: 2-d integer list of memory flanges
Returns:
x: A [batch, num_h_blocks, num_w_blocks,
query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]]
tensor.
""" |
(_, height, width, depth_x) = common_layers.shape_list(x)
# add extra padding to x so that we can extract the memory region
# around the center
paddings = [[0, 0], [memory_flange[0], memory_flange[0]],
[memory_flange[1], memory_flange[1]], [0, 0]]
padded_x = tf.pad(x, paddings)
padded_x.set_shape([None, height+2*memory_flange[0],
width+2*memory_flange[1], depth_x])
num_h_memory_blocks = height//query_shape[0] + 1
num_w_memory_blocks = width//query_shape[1] + 1
x_memory_blocks = _extract_blocks(padded_x,
query_shape[0], query_shape[1])
x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks,
2)
x_left_width = tf.concat(x_width_blocks[:num_w_memory_blocks - 1], axis=2)
x_right_width = tf.concat(x_width_blocks[1:], axis=2)
x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4)
x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1)
x_top_height = tf.concat(x_height_blocks[:num_h_memory_blocks - 1], axis=1)
x_bottom_height = tf.concat(x_height_blocks[1:], axis=1)
x = tf.concat([x_top_height, x_bottom_height], axis=3)
return x |
<SYSTEM_TASK:>
Calculate simple unmasked dot-product local self-attention 2d on tpu.
<END_TASK>
<USER_TASK:>
Description:
def dot_product_unmasked_attention_local_2d_tpu_simple(
x, bias, total_key_depth, total_value_depth, num_heads,
query_shape=(8, 8),
dropout_rate=0.0, image_shapes=None, make_image_summary=False,
dropout_broadcast_dims=None):
"""Calculate simple unmasked dot-product local self-attention 2d on tpu.
The query, key, and value blocks are the same. We do not do a second linear
transformation after computing the values
Args:
x: a Tensor with shape [batch, height, width, depth].
bias: bias Tensor.
total_key_depth: the dimensions of the keys
total_value_depth: the dimensions of the values
num_heads: number of heads
query_shape: a two tuple indicating query shape
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
ret: [batch, height, width, total_value_depth] tensor,
the output of attention.
q: [batch, height, width, total_key_depth] query tensor
k: [batch, height, width, total_key_depth] key tensor
v: [batch, height, width, total_value_depth] value tensor
""" |
# This calculation only works for self attention.
# q, k and v must therefore have the same shape.
orig_x_shape = common_layers.shape_list(x)
# Pad query, key, value to ensure multiple of corresponding lengths if
# necessary
is_padded = False
if (orig_x_shape[1]%query_shape[0]) != 0 or (
orig_x_shape[2]%query_shape[1]) != 0:
x = pad_to_multiple_2d(x, query_shape)
is_padded = True
_, height, width, depth = common_layers.shape_list(x)
assert depth%num_heads == 0
num_h_blocks = height//query_shape[0]
num_w_blocks = width//query_shape[1]
# Extract center queries, keys, and values
x_blocks = _extract_blocks(x, query_shape[0], query_shape[1])
x_blocks = tf.reshape(x_blocks, [-1, query_shape[0]*query_shape[1], depth])
q, k, v = compute_qkv(x_blocks, None, total_key_depth, total_value_depth)
hsplit = lambda x: split_heads(x, num_heads)
q, k, v = map(hsplit, [q, k, v])
logits = tf.matmul(q, k, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
# Dropping out the attention links for each of the heads
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and make_image_summary:
attention_image_summary(weights, image_shapes)
output = tf.matmul(weights, v)
output = combine_heads(output)
# we need to get it back to shape [batch, height, width]
ret = tf.reshape(output, [-1, num_h_blocks, num_w_blocks,
query_shape[0], query_shape[1], total_value_depth])
ret = tf.transpose(ret, [0, 1, 3, 2, 4, 5])
ret = tf.reshape(ret, [-1, num_h_blocks*query_shape[0],
num_w_blocks*query_shape[1], total_value_depth])
# slice if padding was introduced
if is_padded:
ret = tf.slice(ret, [0, 0, 0, 0], [-1, orig_x_shape[1],
orig_x_shape[2], -1])
return ret, q, k, v |
<SYSTEM_TASK:>
Attention to the source and a neighborhood to the left within a block.
<END_TASK>
<USER_TASK:>
Description:
def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None):
"""Attention to the source and a neighborhood to the left within a block.
The sequence is divided into blocks of length block_length. Attention for a
given query position can only see memory positions less than or equal to the
query position in the corresponding block.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
block_length: an integer
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
""" |
with tf.variable_scope(
name, default_name="within_local_attention_1d", values=[q, k, v]):
batch, heads, length, depth_k = common_layers.shape_list(q)
depth_v = common_layers.shape_list(v)[-1]
if isinstance(block_length, tf.Tensor):
const = tf.contrib.util.constant_value(block_length)
if const is not None:
block_length = int(const)
# Pad query, key, value to ensure multiple of block length.
original_length = length
padding_size = tf.mod(-length, block_length)
length += padding_size
padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]]
q = tf.pad(q, padding)
k = tf.pad(k, padding)
v = tf.pad(v, padding)
# Compute attention for all subsequent query blocks.
num_blocks = tf.div(length, block_length)
q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k])
k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k])
v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v])
# [batch, heads, num_blocks, block_length, block_length]
attention = tf.matmul(q, k, transpose_b=True)
attention += tf.reshape(attention_bias_lower_triangle(block_length),
[1, 1, 1, block_length, block_length])
attention = tf.nn.softmax(attention)
# [batch, heads, num_blocks, block_length, depth_v]
output = tf.matmul(attention, v)
output = tf.reshape(output, [batch, heads, -1, depth_v])
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in
(batch, heads, length, depth_v)])
return output |
<SYSTEM_TASK:>
Converts tensor from relative to aboslute indexing for local attention.
<END_TASK>
<USER_TASK:>
Description:
def _relative_position_to_absolute_position_unmasked(x):
"""Converts tensor from relative to aboslute indexing for local attention.
Args:
x: a Tensor of shape [batch (or batch*num_blocks), heads,
length, 2 * length - 1]
Returns:
A Tensor of shape [batch (or batch*num_blocks), heads, length, length-1]
""" |
x_shape = common_layers.shape_list(x)
batch = x_shape[0]
heads = x_shape[1]
length = x_shape[2]
# Concat columns of pad to shift from relative to absolute indexing.
col_pad = tf.zeros((batch, heads, length, 1))
x = tf.concat([x, col_pad], axis=3)
# Concat extra elements so to add up to shape (len+1, 2*len-1).
flat_x = tf.reshape(x, [batch, heads, length * 2 * length])
flat_pad = tf.zeros((batch, heads, length-1))
flat_x_padded = tf.concat([flat_x, flat_pad], axis=2)
# Reshape and slice out the padded elements.
final_x = tf.reshape(flat_x_padded, [batch, heads, length+1, 2*length-1])
final_x = final_x[:, :, :, length-1:]
final_x = final_x[:, :, :length, :]
return final_x |
<SYSTEM_TASK:>
Helper function to create a local version of the keys or values for 1d.
<END_TASK>
<USER_TASK:>
Description:
def _make_local_block(x, depth, batch, heads, num_blocks, block_length):
"""Helper function to create a local version of the keys or values for 1d.""" |
prev_block = tf.slice(x, [0, 0, 0, 0, 0],
[-1, -1, num_blocks - 1, -1, -1])
cur_block = tf.slice(x, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1])
local_block = tf.concat([prev_block, cur_block], 3)
return tf.reshape(local_block,
[batch, heads, num_blocks - 1, block_length * 2, depth]) |
<SYSTEM_TASK:>
Reshapes input by splitting its length over blocks of memory_block_size.
<END_TASK>
<USER_TASK:>
Description:
def reshape_by_blocks(x, x_shape, memory_block_size):
"""Reshapes input by splitting its length over blocks of memory_block_size.
Args:
x: a Tensor with shape [batch, heads, length, depth]
x_shape: tf.TensorShape of x.
memory_block_size: Integer which divides length.
Returns:
Tensor with shape
[batch, heads, length // memory_block_size, memory_block_size, depth].
""" |
x = tf.reshape(x, [
x_shape[0], x_shape[1], x_shape[2] // memory_block_size,
memory_block_size, x_shape[3]
])
return x |
<SYSTEM_TASK:>
Gathers blocks with gaps in between.
<END_TASK>
<USER_TASK:>
Description:
def gather_dilated_memory_blocks(x,
num_memory_blocks,
gap_size,
query_block_size,
memory_block_size,
gather_indices,
direction="left"):
"""Gathers blocks with gaps in between.
Args:
x: Tensor of shape [length, batch, heads, depth]
num_memory_blocks: how many memory blocks to look in "direction". Each will
be separated by gap_size.
gap_size: an integer indicating the gap size
query_block_size: an integer indicating size of query block
memory_block_size: an integer indicating the size of a memory block.
gather_indices: The indices to gather from.
direction: left or right
Returns:
Tensor of shape [batch, heads, blocks, block_length, depth]
""" |
gathered_blocks = []
# gathering memory blocks
for block_id in range(num_memory_blocks):
block_end_index = -(query_block_size + gap_size *
(block_id + 1) + memory_block_size * block_id)
block_start_index = (
(memory_block_size + gap_size) * (num_memory_blocks - (block_id + 1)))
if direction != "left":
[block_end_index,
block_start_index] = [-block_start_index, -block_end_index]
if block_end_index == 0:
x_block = x[block_start_index:]
else:
x_block = x[block_start_index:block_end_index]
def gather_dilated_1d_blocks(x, gather_indices):
x_new = tf.gather(x, gather_indices)
# [batch, heads, blocks, block_length, dim]
return tf.transpose(x_new, [2, 3, 0, 1, 4])
gathered_blocks.append(gather_dilated_1d_blocks(x_block, gather_indices))
return tf.concat(gathered_blocks, 3) |
<SYSTEM_TASK:>
Making sure x is a multiple of shape.
<END_TASK>
<USER_TASK:>
Description:
def pad_to_multiple_2d(x, block_shape):
"""Making sure x is a multiple of shape.
Args:
x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor
block_shape: a 2-d list of integer shapes
Returns:
padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor
""" |
old_shape = x.get_shape().dims
last = old_shape[-1]
if len(old_shape) == 4:
height_padding = -common_layers.shape_list(x)[1] % block_shape[0]
width_padding = -common_layers.shape_list(x)[2] % block_shape[1]
paddings = [[0, 0], [0, height_padding], [0, width_padding], [0, 0]]
elif len(old_shape) == 5:
height_padding = -common_layers.shape_list(x)[2] % block_shape[0]
width_padding = -common_layers.shape_list(x)[3] % block_shape[1]
paddings = [[0, 0], [0, 0], [0, height_padding], [0, width_padding], [0, 0]]
padded_x = tf.pad(x, paddings)
padded_shape = padded_x.get_shape().as_list()
padded_shape = padded_shape[:-1] + [last]
padded_x.set_shape(padded_shape)
return padded_x |
<SYSTEM_TASK:>
Reshapes a tensor between dimensions i and j.
<END_TASK>
<USER_TASK:>
Description:
def reshape_range(tensor, i, j, shape):
"""Reshapes a tensor between dimensions i and j.""" |
t_shape = common_layers.shape_list(tensor)
target_shape = t_shape[:i] + shape + t_shape[j:]
return tf.reshape(tensor, target_shape) |
<SYSTEM_TASK:>
Gathers flattened blocks from x.
<END_TASK>
<USER_TASK:>
Description:
def gather_blocks_2d(x, indices):
"""Gathers flattened blocks from x.""" |
x_shape = common_layers.shape_list(x)
x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])])
# [length, batch, heads, dim]
x_t = tf.transpose(x, [2, 0, 1, 3])
x_new = tf.gather(x_t, indices)
# returns [batch, heads, num_blocks, block_length ** 2, dim]
return tf.transpose(x_new, [2, 3, 0, 1, 4]) |
<SYSTEM_TASK:>
scatters blocks from x into shape with indices.
<END_TASK>
<USER_TASK:>
Description:
def scatter_blocks_2d(x, indices, shape):
"""scatters blocks from x into shape with indices.""" |
x_shape = common_layers.shape_list(x)
# [length, batch, heads, dim]
x_t = tf.transpose(
tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3])
x_t_shape = common_layers.shape_list(x_t)
indices = tf.reshape(indices, [-1, 1])
scattered_x = tf.scatter_nd(indices, x_t, x_t_shape)
scattered_x = tf.transpose(scattered_x, [1, 2, 0, 3])
return tf.reshape(scattered_x, shape) |
<SYSTEM_TASK:>
Getting gather indices.
<END_TASK>
<USER_TASK:>
Description:
def gather_indices_2d(x, block_shape, block_stride):
"""Getting gather indices.""" |
# making an identity matrix kernel
kernel = tf.eye(block_shape[0] * block_shape[1])
kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
# making indices [1, h, w, 1] to appy convs
x_shape = common_layers.shape_list(x)
indices = tf.range(x_shape[2] * x_shape[3])
indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1])
indices = tf.nn.conv2d(
tf.cast(indices, tf.float32),
kernel,
strides=[1, block_stride[0], block_stride[1], 1],
padding="VALID")
# making indices [num_blocks, dim] to gather
dims = common_layers.shape_list(indices)[:3]
if all([isinstance(dim, int) for dim in dims]):
num_blocks = functools.reduce(operator.mul, dims, 1)
else:
num_blocks = tf.reduce_prod(dims)
indices = tf.reshape(indices, [num_blocks, -1])
return tf.cast(indices, tf.int32) |
<SYSTEM_TASK:>
Creates a mask for 2d block raster scan.
<END_TASK>
<USER_TASK:>
Description:
def make_2d_block_raster_mask(query_shape, memory_flange):
"""Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flange_height, memory_flange_width)
Returns:
A tensor of shape query_size, memory_size
""" |
# mask inside the query block
query_triangle = common_layers.ones_matrix_band_part(
np.prod(query_shape), np.prod(query_shape), -1, 0)
split_query_masks = tf.split(query_triangle, query_shape[0], axis=1)
# adding mask for left and right
mask_pieces = [
tf.concat( # pylint: disable=g-complex-comprehension
[tf.ones([np.prod(query_shape), memory_flange[1]]),
split_query_masks[i],
tf.zeros([np.prod(query_shape), memory_flange[1]])],
axis=1) for i in range(query_shape[0])
]
# adding mask for top
final_mask = tf.concat(
[
tf.ones([
np.prod(query_shape),
(query_shape[1] + 2 * memory_flange[1]) * memory_flange[0]
]),
tf.concat(mask_pieces, axis=1)
],
axis=1)
# 0.0 is visible location, 1.0 is masked.
return 1. - final_mask |
<SYSTEM_TASK:>
Get the memory regions that surround a 2d query.
<END_TASK>
<USER_TASK:>
Description:
def get_memory_region(x, query_block_shape, memory_flange, q_indices):
"""Get the memory regions that surround a 2d query.
The memory regions will be the left and top right.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
query_block_shape: a 2-d tuple of integers
memory_flange: a 2-d tuple of integers
q_indices: a tensor of indices for each of the center blocks.
[num_blocks, block_length]
Returns:
x_flange: A tensor of shape [batch, heads, #blocks, block_length, depth]
""" |
# Padding x to be multiple of query_shape and then
# extracting the memory blocks from the same regions as the query blocks
x_query_padded = pad_to_multiple_2d(x, query_block_shape)
x_center = gather_blocks_2d(x_query_padded, q_indices)
# Then padding the flange region
paddings = [[0, 0], [0, 0], [memory_flange[0], 0],
[memory_flange[1], memory_flange[1]], [0, 0]]
x_memory_padded = tf.pad(x_query_padded, paddings)
left_x = None
top_x = None
# Extracting the memory regions around the query block. left_x_region extends
# to the left and the top_x_region is the combination of top left, top, and
# top right of the query block
# if no left region
if memory_flange[1] > 0:
left_x_region = x_memory_padded[:, :, memory_flange[
0]:, :-(query_block_shape[1] + memory_flange[1]), :]
left_memory_shape = (query_block_shape[0], memory_flange[1])
left_indices = gather_indices_2d(left_x_region, left_memory_shape,
query_block_shape)
left_x = gather_blocks_2d(left_x_region, left_indices)
# if no top region
if memory_flange[0] > 0:
top_x_region = x_memory_padded[:, :, :-query_block_shape[0], :, :]
top_memory_shape = (memory_flange[0],
query_block_shape[1] + 2 * memory_flange[1])
top_indices = gather_indices_2d(top_x_region, top_memory_shape,
query_block_shape)
top_x = gather_blocks_2d(top_x_region, top_indices)
x_flange = None
if top_x is not None and left_x is not None:
x_flange = tf.concat([top_x, left_x], axis=3)
else:
x_flange = top_x if top_x is not None else left_x
return x_flange, x_center |
<SYSTEM_TASK:>
Get right shifted blocks for masked local attention 2d.
<END_TASK>
<USER_TASK:>
Description:
def get_shifted_center_blocks(x, indices):
"""Get right shifted blocks for masked local attention 2d.
Args:
x: A tensor with shape [batch, heads, height, width, depth]
indices: The indices to gather blocks
Returns:
x_shifted: a tensor of extracted blocks, each block right shifted along
length.
""" |
center_x = gather_blocks_2d(x, indices)
# Shift right along the length dimension
def shift_right_2d_blocks(x):
"""Shift the second to last dimension of x right by one."""
shifted_targets = (
tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :, :-1, :])
return shifted_targets
x_shifted = shift_right_2d_blocks(center_x)
return x_shifted |
<SYSTEM_TASK:>
Right shifts once in every block.
<END_TASK>
<USER_TASK:>
Description:
def right_shift_blockwise(x, query_shape, name=None):
"""Right shifts once in every block.
Args:
x: a tensor of shape [batch, height, width, depth]
query_shape: A 2d tuple of ints
name: a string
Returns:
output: a tensor of the same shape as x
""" |
with tf.variable_scope(
name, default_name="right_shift_blockwise", values=[x]):
x_list_shape = x.get_shape().as_list()
x_shape = common_layers.shape_list(x)
# Add a dummy dimension for heads.
x = tf.expand_dims(x, axis=1)
x = pad_to_multiple_2d(x, query_shape)
padded_x_shape = common_layers.shape_list(x)
# Set up q blocks.
x_indices = gather_indices_2d(x, query_shape, query_shape)
x_new = get_shifted_center_blocks(x, x_indices)
# Put representations back into original shapes.
output = scatter_blocks_2d(x_new, x_indices, padded_x_shape)
# Remove the dummy head dimension.
output = tf.squeeze(output, axis=1)
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0], [-1, x_shape[1], x_shape[2], -1])
output.set_shape(x_list_shape)
return output |
<SYSTEM_TASK:>
Computes query, key and value.
<END_TASK>
<USER_TASK:>
Description:
def compute_qkv(query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
q_filter_width=1,
kv_filter_width=1,
q_padding="VALID",
kv_padding="VALID",
vars_3d_num_heads=0,
layer_collection=None):
"""Computes query, key and value.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels]
total_key_depth: an integer
total_value_depth: an integer
q_filter_width: An integer specifying how wide you want the query to be.
kv_filter_width: An integer specifying how wide you want the keys and values
to be.
q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
vars_3d_num_heads: an optional (if we want to use 3d variables)
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
q, k, v : [batch, length, depth] tensors
""" |
if memory_antecedent is None:
memory_antecedent = query_antecedent
q = compute_attention_component(
query_antecedent,
total_key_depth,
q_filter_width,
q_padding,
"q",
vars_3d_num_heads=vars_3d_num_heads,
layer_collection=layer_collection)
k = compute_attention_component(
memory_antecedent,
total_key_depth,
kv_filter_width,
kv_padding,
"k",
vars_3d_num_heads=vars_3d_num_heads,
layer_collection=layer_collection)
v = compute_attention_component(
memory_antecedent,
total_value_depth,
kv_filter_width,
kv_padding,
"v",
vars_3d_num_heads=vars_3d_num_heads,
layer_collection=layer_collection)
return q, k, v |
<SYSTEM_TASK:>
Self-attention feedforward layer.
<END_TASK>
<USER_TASK:>
Description:
def ffn_self_attention_layer(x,
filter_depth,
output_depth,
num_parts,
dropout_rate,
share_kv=False,
name=None):
"""Self-attention feedforward layer.
We use self-attention to do feedforward computations. We apply this function
positionwise where for each position, we linearly transform the output to have
depth filter_depth, and break up the result depth-wise into num_parts
contiguous parts. The parts self-attend, we concatenate the results
depth-wise, and we linearly transform to a depth of output_depth. The goal is
to get multiplicative interactions between components of a representation.
Args:
x: a Tensor with shape [batch, length, channels]
filter_depth: an integer
output_depth: an integer
num_parts: an integer dividing filter depth
dropout_rate: a floating point number
share_kv: Share the key value transform
name: an optional string
Returns:
A Tensor with shape [batch, length, output_depth].
""" |
with tf.variable_scope(
name, default_name="feedforward_self_attention", values=[x]):
x_shape = common_layers.shape_list(x)
part_depth = filter_depth // num_parts
if not share_kv:
combined = common_layers.dense(
x, filter_depth * 3, use_bias=False, name="qkv_transform")
combined = tf.expand_dims(combined, axis=2)
q, k, v = tf.split(combined, 3, axis=3)
else:
q = tf.expand_dims(
common_layers.dense(
x, filter_depth, use_bias=False, name="q_transform"),
axis=2)
kv_combined = tf.expand_dims(
common_layers.dense(
tf.concat([x, x], axis=1),
filter_depth,
use_bias=False,
name="kv_transform"),
axis=2)
k, v = tf.split(kv_combined, [x_shape[1], x_shape[1]], axis=1)
batch_q = tf.reshape(q, [-1, 1, num_parts, part_depth])
batch_k = tf.reshape(k, [-1, 1, num_parts, part_depth])
batch_v = tf.reshape(v, [-1, 1, num_parts, part_depth])
batch_q *= part_depth**-0.5
# non-masked bias
bias = None
x = dot_product_attention(batch_q, batch_k, batch_v, bias, dropout_rate)
x = tf.reshape(x, [x_shape[0], x_shape[1], filter_depth])
x = common_layers.dense(
x, output_depth, use_bias=False, name="output_transform")
return x |
<SYSTEM_TASK:>
Attention over parameters.
<END_TASK>
<USER_TASK:>
Description:
def parameter_attention(x,
total_key_depth,
total_value_depth,
output_depth,
memory_rows,
num_heads,
dropout_rate,
name=None):
"""Attention over parameters.
We use the same multi-headed attention as in the other layers, but the memory
keys and values are model parameters. There are no linear transformation on
the keys or values.
We are also a bit more careful about memory usage, since the number of
memory positions may be very large.
Args:
x: a Tensor with shape [batch, length_q, channels]
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
memory_rows: an integer
num_heads: an integer dividing total_key_depth and total_value_depth
dropout_rate: a floating point number
name: an optional string
Returns:
A Tensor with shape [batch, length_q, output_depth].
""" |
with tf.variable_scope(name, default_name="parameter_attention", values=[x]):
head_size_k = total_key_depth // num_heads
head_size_v = total_value_depth // num_heads
var_shape_k = [num_heads, memory_rows, head_size_k]
var_shape_v = [num_heads, memory_rows, head_size_v]
k = tf.get_variable(
"k",
var_shape_k,
initializer=tf.random_normal_initializer(
0, output_depth**-0.5 * (num_heads**0.5)))
v = tf.get_variable(
"v",
var_shape_v,
initializer=tf.random_normal_initializer(
0, output_depth**-0.5 * (output_depth**0.5)))
batch_size = common_layers.shape_list(x)[0]
length = common_layers.shape_list(x)[1]
q = common_layers.dense(
x, total_key_depth, use_bias=False, name="q_transform")
if dropout_rate:
# This is a cheaper form of attention dropout where we use to use
# the same dropout decisions across batch elements and query positions,
# but different decisions across heads and memory positions.
v = tf.nn.dropout(
v, 1.0 - dropout_rate, noise_shape=[num_heads, memory_rows, 1])
# query is [batch, length, hidden_size]
# reshape and transpose it to [heads, batch * length, head_size]
q = tf.reshape(q, [batch_size, length, num_heads, head_size_k])
q = tf.transpose(q, [2, 0, 1, 3])
q = tf.reshape(q, [num_heads, batch_size * length, head_size_k])
weights = tf.matmul(q, k, transpose_b=True)
weights = tf.nn.softmax(weights)
y = tf.matmul(weights, v)
y = tf.reshape(y, [num_heads, batch_size, length, head_size_v])
y = tf.transpose(y, [1, 2, 0, 3])
y = tf.reshape(y, [batch_size, length, total_value_depth])
y.set_shape([None, None, total_value_depth])
y = common_layers.dense(
y, output_depth, use_bias=False, name="output_transform")
return y |
<SYSTEM_TASK:>
Return a tensor with given shape containing coordinate along given axis.
<END_TASK>
<USER_TASK:>
Description:
def coordinate_tensor(shape, axis):
"""Return a tensor with given shape containing coordinate along given axis.
Args:
shape: a Tensor representing the shape of the output Tensor
axis: an integer
Returns:
A tensor with shape shape and type tf.int32, where each elements its
coordinate along the given axis.
""" |
if axis < 0:
axis = tf.size(shape) + axis # Convert to positive for the one_hot indice
r = tf.range(shape[axis])
r_shape = tf.one_hot(
axis, tf.size(shape), on_value=-1, off_value=1, dtype=tf.int32)
return tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape) |
<SYSTEM_TASK:>
Implementing attention that runs inside each expert.
<END_TASK>
<USER_TASK:>
Description:
def self_attention_expert(x,
batch_coordinate,
mask_right=True,
split_batch=False,
attention_num_head=1,
attention_kq_size=None,
attention_v_size=None):
"""Implementing attention that runs inside each expert.
Args:
x: A tensor of shape[batch, depth]. Contains representations from
different positions, which are lexicographically ordered.
batch_coordinate: A tensor of shape [batch, 1] containing the batch
coordinate of each element in x. This is needed to make sure that
positions from different sequences don't attend to each other.
mask_right: A bool. If true, we will not attend to positions on the right,
just as decoder self attention.
split_batch (bool): If True, each sequence of the batch is processed
individually on a loop. If False, the sequences are processed all at
once and a mask is applied to isolate the sequences from each others
attention_num_head (int): number of attention heads
attention_kq_size (int): dimension used for the attention key, and query
attention_v_size (int): dimension used for the attention value
Returns:
out: A tensor of shape [batch, depth].
example use:
expert_utils.local_moe(
...
expert_fn=functools.partial(self_attention_expert, mask_right=)
)
""" |
depth = x.get_shape().as_list()[-1]
length = common_layers.shape_list(batch_coordinate)[0]
# Print a warning message if one of the expert isn't used (useful at
# inference where summaries aren't used and the gating function don't add
# noise)
global _expert_count # Hack to make each expert have a unique id
_expert_count += 1
length = tf.cond(
tf.equal(length, 0),
lambda: tf.Print( # pylint: disable=g-long-lambda
length, [length], "Expert {} empty: ".format(_expert_count)),
lambda: length,
)
tf.summary.scalar("batch_size", length, family="experts_stats_batch_size")
attention_kq_size = attention_kq_size or depth
attention_v_size = attention_v_size or depth
def length_not_null(x, batch_coordinate):
"""Branch of the graph only evaluated when length isn't null."""
# Mask between the sequences (not used if map_ids is used)
bias_batch = attention_bias_coordinates(batch_coordinate)
def add_or_set_if(prev_bias, new_bias, condition):
"""Add the bias together while considering the None case."""
if not condition:
return prev_bias
if prev_bias is None:
return new_bias
return prev_bias + new_bias
def mask_and_call_attention(x):
"""Function applied once for each sequence of the batch."""
# Mask to prevent sequences of attending to the future
length = common_layers.shape_list(x)[1] # x has shape [1, length,...]
bias_past = tf.reshape(
attention_bias_lower_triangle(length), [length, length])
# bias has shape [length, length]
bias = None
bias = add_or_set_if(bias, bias_past, mask_right)
bias = add_or_set_if(bias, bias_batch, not split_batch)
bias = tf.reshape(bias, [1, 1, length, length])
return multihead_attention(
x,
None,
bias,
total_key_depth=attention_kq_size,
total_value_depth=attention_v_size,
output_depth=depth,
num_heads=attention_num_head,
dropout_rate=0.0)
if split_batch:
out = expert_utils.map_ids(x, batch_coordinate, mask_and_call_attention)
else:
x = tf.reshape(x, [1, length, depth])
out = mask_and_call_attention(x)
out = tf.squeeze(out, 0)
return out
# If the length is empty, just forward an empty tensor (avoid having to
# evaluate multihead_attention with tensor having dim equal to zeros)
out = tf.cond(
tf.equal(length, 0),
lambda: tf.zeros(shape=[0, depth], dtype=tf.float32, name="empty_out"),
lambda: length_not_null(x, batch_coordinate),
)
return out |
<SYSTEM_TASK:>
Attention using a mixture of experts.
<END_TASK>
<USER_TASK:>
Description:
def local_expert_attention(x,
k,
loss_coef,
attention_num_experts,
train=True,
batch_coordinate=None,
**kwargs):
"""Attention using a mixture of experts.
Positions sent to the same expert can attend to each other.
The mixture of experts is "local" in that it is replicated on each
datashard.
local_moe flatten all batches so to avoid problems with padding (ex: all
padding going to the same expert, self attention attending to non null
padding tokens,...), the padding should be removed before.
Args:
x: a Tensor with shape [batch, length, depth] or [1, batch*length, depth]
k: The number of experts to dispatch each example to
loss_coef: a scalar. A multiplier for the expert loss
attention_num_experts: The number of experts to use
train: a boolean for the current mode
batch_coordinate (tf.Tensor): int32 tensor of shape [1, batch*length, 1]
containing the batch ids. If None, deduced from first dim of x.
**kwargs: Arguments to forward to self_attention_expert
Returns:
y: a Tensor with shape [batch, length, depth]
loss: a Scalar
""" |
if batch_coordinate is None:
batch_coordinate = tf.expand_dims(
coordinate_tensor(common_layers.shape_list(x)[:-1], axis=0), axis=-1)
with tf.variable_scope("local_expert_attention"):
additional_dispatch_params = {"batch_coordinate": batch_coordinate}
return expert_utils.local_moe(
x,
train,
functools.partial(self_attention_expert, **kwargs),
attention_num_experts,
k=k,
loss_coef=loss_coef,
pass_x=True,
pass_gates=False,
additional_dispatch_params=additional_dispatch_params,
) |
<SYSTEM_TASK:>
Perform dot product on a subset of the sequence.
<END_TASK>
<USER_TASK:>
Description:
def expert_dot_product(q, k, v, info_q, info_k):
"""Perform dot product on a subset of the sequence.
Can add a mask to the attention to prevent sequences to attend to each other
and to prevent attention to the future.
Args:
q (tf.Tensor): Queries of shape [length_expert_q, depth_k]
k (tf.Tensor): Keys of shape [length_expert_k, depth_k]
v (tf.Tensor): Values of shape [length_expert_k, depth_v]
info_q (BatchInfo): Batch info for queries. If None, no mask is added
info_k (BatchInfo): Batch info for keys
Returns:
tf.Tensor: dot product attention output ([length_expert_q, depth_v])
""" |
length_q = common_layers.shape_list(q)[0]
length_k = common_layers.shape_list(k)[0]
depth_v = v.get_shape().as_list()[-1]
# Create the mask
bias = attention_bias_coordinates(info_q.coordinates, info_k.coordinates)
if info_k.order is not None:
bias += attention_bias_future(info_q.order, info_k.order)
# Restore batch and head dimension
q, k, v = [tf.expand_dims(tf.expand_dims(t, 0), 0) for t in (q, k, v)]
def is_zero():
zeros = tf.zeros(shape=[1, 1, length_q, depth_v], dtype=tf.float32)
zeros = tf.Print(zeros, [length_k, length_q], "length_k/length_q: ")
return zeros
def is_not_zero():
return dot_product_attention(
q,
k,
v,
bias=bias,
# No image summary to avoid "Retval[0] does not have value" (because
# inside a condition)
make_image_summary=False,
)
# TODO(epot): Should make sure a query gets at least one key. Because the
# different sequences of a batch are merged, it's possible that a
# query from a sequence only receive memory from another sequence, so
# with the mask, the query will perform a softmax on -infinity values.
# A hack could be to add at least one sequence of each batch on each group so
# the query can attend to at least one element.
# Softmax(Q.K)*V
v_out = tf.cond(
tf.logical_or(tf.equal(length_q, 0), tf.equal(length_k, 0)),
is_zero,
is_not_zero,
)
# Remove batch and head dimension
v_out = tf.squeeze(v_out, axis=0)
v_out = tf.squeeze(v_out, axis=0)
return v_out |
<SYSTEM_TASK:>
Construct the graph with either tf.map_fn or a python for loop.
<END_TASK>
<USER_TASK:>
Description:
def map_fn_switch(fn, elems, use_map_fn=True, **kwargs):
"""Construct the graph with either tf.map_fn or a python for loop.
This function is mainly for for benchmarking purpose.
tf.map_fn is dynamic but is much slower than creating a static graph with
for loop. However, having a for loop make the graph much longer to build
and can consume too much RAM on distributed setting.
Args:
fn (fct): same that tf.map_fn but for now can only return a single tensor
value (instead of a tuple of tensor for the general case)
elems (tuple): same that tf.map_fn
use_map_fn (bool): If True, tf.map_fn is used, if False, for _ in _: is used
instead
**kwargs: Additional tf.map_fn arguments (ignored if use_map_fn is False)
Returns:
tf.Tensor: the output of tf.map_fn
""" |
if use_map_fn:
return tf.map_fn(fn, elems, **kwargs)
elems_unpacked = (tf.unstack(e) for e in elems)
out_unpacked = [fn(e) for e in zip(*elems_unpacked)]
out = tf.stack(out_unpacked)
return out |
<SYSTEM_TASK:>
Increase the length and change the dimensionality.
<END_TASK>
<USER_TASK:>
Description:
def deconv_elems_1d(x, factor, out_depth=None):
"""Increase the length and change the dimensionality.
Expand/project each positions of dim depth of the input into
factor*tokens of dim out_depth
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Multiplicative factor of each tokens.
out_depth (int): Output depth (if None, keep depth constant)
Returns:
tf.Tensor: shape [batch_size, length*factor, out_depth]
""" |
out_depth = out_depth or x.get_shape().as_list()[-1]
x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth]
x = layers().Conv2DTranspose(
filters=out_depth,
kernel_size=(1, factor),
strides=(1, factor),
padding="valid",
data_format="channels_last",
)(x) # [batch_size, 1, length*factor, out_depth]
x = tf.squeeze(x, 1) # [batch_size, length*factor, depth]
return x |
<SYSTEM_TASK:>
Decrease the length and change the dimensionality.
<END_TASK>
<USER_TASK:>
Description:
def conv_elems_1d(x, factor, out_depth=None):
"""Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Length compression factor.
out_depth (int): Output depth
Returns:
tf.Tensor: shape [batch_size, length//factor, out_depth]
""" |
out_depth = out_depth or x.get_shape().as_list()[-1]
# with tf.control_dependencies( # Dynamic assertion
# [tf.assert_equal(tf.shape(x)[1] % factor, 0)]):
x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth]
x = layers().Conv2D(
filters=out_depth,
kernel_size=(1, factor),
strides=(1, factor),
padding="valid",
data_format="channels_last",
)(x) # [batch_size, 1, length//factor, out_depth]
x = tf.squeeze(x, 1) # [batch_size, length//factor, depth]
return x |
<SYSTEM_TASK:>
Reduce the length dimension using self attention.
<END_TASK>
<USER_TASK:>
Description:
def local_reduction_attention(x, block_length, multihead_params):
"""Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch, length // factor, depth]
""" |
@expert_utils.add_name_scope()
def dot_product_self_local_attention_flattened(q, k, v):
"""Strided block local self-attention.
No overlap between the blocks.
Args:
q (tf.Tensor): shape [batch, heads, length, depth_k]
k (tf.Tensor): shape [batch, heads, length, depth_k]
v (tf.Tensor): shape [batch, heads, length, depth_v]
Returns:
tf.Tensor: shape [batch, heads, length, depth_v]
"""
_, num_head, _, depth = q.get_shape().as_list()
# Extract the blocks
def pad_and_reshape(x):
"""Split the length dim into [num_block, block_length]."""
length_x = common_layers.shape_list(x)[2]
# Add some padding, but won't matter as the last block will never be
# attended by the query (after compression)
x = tf.pad(x, [[0, 0], [0, 0], [0, -length_x % block_length], [0, 0]])
x = tf.reshape(
x,
[
common_layers.shape_list(x)[0], # Batch
num_head, # Head
common_layers.shape_list(x)[2] // block_length, # Num blocks
block_length, # Block length
depth, # Depth
])
return x
q, k, v = [pad_and_reshape(t) for t in (q, k, v)]
# Perform attention on the flattened dot product
logits = tf.matmul(q, k, transpose_b=True)
logits = tf.reshape(
logits,
[
common_layers.shape_list(logits)[0], # Batch
num_head, # Head
common_layers.shape_list(logits)[2], # Num blocks
block_length**2, # Flatten last dimension
])
weights = tf.nn.softmax(logits)
weights = tf.reshape(
weights,
[
common_layers.shape_list(weights)[0], # Batch
num_head, # Head
common_layers.shape_list(weights)[2], # Num blocks
block_length,
block_length, # Restore the block length dimension
])
weights = tf.reduce_sum(weights, axis=3, keep_dims=True) # Compress block
v_out = tf.matmul(weights, v) # [1, block_length] @ [block_length, depth]
v_out = tf.squeeze(v_out, axis=3)
return v_out
return multihead_attention(
x,
None,
bias=None,
output_depth=x.get_shape().as_list()[-1],
attention_type=dot_product_self_local_attention_flattened,
**multihead_params) |
<SYSTEM_TASK:>
Reduce the length dimension by compressing with conv.
<END_TASK>
<USER_TASK:>
Description:
def multihead_self_attention_reduced(
x,
memory_antecedent=None,
bias=None,
factor=None,
multihead_params=None,
nonlinearity="none",
reduction_type="conv",
add_mask=True,
):
"""Reduce the length dimension by compressing with conv.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
memory_antecedent (tf.Tensor): Unsupported for now
bias (tf.Tensor): Ignored
factor (int): compression factor for the memory sequence
multihead_params (dict): parameters for multihead attention
nonlinearity (str): Add some non-linearity after the memory block
reduction_type (str): type of compression
add_mask (bool): If True, add the bias to prevent attention to the future
Returns:
(tf.Tensor): float32 of shape [batch, length, depth]
Raises:
ValueError: If reduction_type or nonlinearity is invalid
""" |
if not factor or not multihead_params:
raise ValueError("factor and multihead_params should be set")
if memory_antecedent is not None:
raise NotImplementedError(
"multihead_self_attention_reduced only works with self-attention")
depth = x.get_shape().as_list()[-1]
# Could try to have some overlap between the blocks but that would
# create conv artifacts, would make it difficult to not attend to the future
# within one group and the padding should be handled specially.
# Reduce the memory dimension
if reduction_type == "attention":
memory_x = local_reduction_attention(x, factor, multihead_params)
elif reduction_type == "conv":
# With valid padding, the last block won't be computed (not attended anyway)
memory_x = conv_elems_1d(x, factor)
else:
raise ValueError("Unknown reduction type {}".format(reduction_type))
if nonlinearity == "silu":
memory_x *= tf.nn.sigmoid(memory_x)
elif nonlinearity != "none":
raise ValueError("Unknown non linearity {}".format(nonlinearity))
memory_x = tf.concat(
# Add the first elem to make it attendable by everyone (otherwise the
# first block cannot attend to anything)
[x[:, :1, :], memory_x],
axis=1,
)
# Construct the bias
@expert_utils.add_name_scope()
def construct_bias_vectors(t, axis):
length = tf.to_float(common_layers.shape_list(t)[1])
length_coordinates = tf.range(length, dtype=tf.float32)
length_coordinates = tf.expand_dims(length_coordinates, axis=axis)
# [1, length_k] or [length_q, 1]
return length_coordinates
if add_mask: # Create mask to prevent attention to the future
bias = tf.to_float(
tf.greater(
# Because we add the first elem to the memory block and it can be
# attended by anyone,we don't need to add +1 anymore to prevent self
# attention Use * factor to make sure the last tokens of a block
# cannot attend the block
construct_bias_vectors(memory_x, 0) * factor,
# +epsilon to avoid float equality
construct_bias_vectors(x, 1) + 1e-3,
)) * -1e9
bias = tf.expand_dims(bias, axis=0)
bias = tf.expand_dims(bias, axis=0) # [1, 1, length_k, length_q]
else:
bias = None
return multihead_attention(
query_antecedent=x,
memory_antecedent=memory_x,
bias=bias,
output_depth=depth,
**multihead_params) |
<SYSTEM_TASK:>
Scaled dot-product attention. One head. One spatial dimension.
<END_TASK>
<USER_TASK:>
Description:
def scaled_dot_product_attention_simple(q, k, v, bias, name=None):
"""Scaled dot-product attention. One head. One spatial dimension.
Args:
q: a Tensor with shape [batch, length_q, depth_k]
k: a Tensor with shape [batch, length_kv, depth_k]
v: a Tensor with shape [batch, length_kv, depth_v]
bias: optional Tensor broadcastable to [batch, length_q, length_kv]
name: an optional string
Returns:
A Tensor.
""" |
with tf.variable_scope(
name, default_name="scaled_dot_product_attention_simple"):
scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2]))
logits = tf.matmul(q * scalar, k, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if common_layers.should_generate_summaries():
tf.summary.image(
"attention", tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1)
return tf.matmul(weights, v) |
<SYSTEM_TASK:>
Convert an group index to its bit representation.
<END_TASK>
<USER_TASK:>
Description:
def _idx_to_bits(self, i):
"""Convert an group index to its bit representation.""" |
bits = bin(i)[2:].zfill(self.nb_hyperplanes) # Pad the bits str with 0
return [-1.0 if b == "0" else 1.0 for b in bits] |
<SYSTEM_TASK:>
Return the bucket id of the given tensor.
<END_TASK>
<USER_TASK:>
Description:
def get_gates(self, x):
"""Return the bucket id of the given tensor.
Args:
x (tf.Tensor): float32 of shape [length, depth]
Returns:
tf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets]
containing the id of the bucket
""" |
# The balance loss don't propagate to the rest of the network
x = tf.stop_gradient(x)
# [length, depth] * [depth, nb_vectors * replicat]
x = tf.matmul(x, self.t_vectors)
# [length, nb_vector * replicat]
x = tf.sign(x) # Get on which side of the hyperplane the keys are.
# x = tf.reshape(x, [-1, nb_replicat, nb_vector])
# [length, replicat, nb_vector] * [nb_vector, 2^nb_vector - 1]
x = tf.matmul(x, self.t_group, transpose_b=True) / self.nb_hyperplanes
# We get a similarity score for each of the group between [-1, 1]
# [length, (replicat,) 2^nb_vector - 1]
# Do an argmax to get the most likely group for each replicat
x = tf.argmax(x, axis=-1)
# [length(, replicat)]
# One-hot for compatibility with the sparse dispatcher
x = tf.one_hot(x, self.nb_buckets)
# TODO(epot): Use a loss to force an even distribution
return x |
<SYSTEM_TASK:>
The image encoder for the VAN.
<END_TASK>
<USER_TASK:>
Description:
def van_image_enc_2d(x, first_depth, reuse=False, hparams=None):
"""The image encoder for the VAN.
Similar architecture as Ruben's paper
(http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf).
Args:
x: The image to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
hparams: The python hparams.
Returns:
The encoded image.
""" |
with tf.variable_scope('van_image_enc', reuse=reuse):
enc_history = [x]
enc = tf.layers.conv2d(
x, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d(
enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
enc = tf.nn.dropout(enc, hparams.van_keep_prob)
enc = tf.contrib.layers.layer_norm(enc)
enc_history.append(enc)
enc = tf.layers.conv2d(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.layers.conv2d(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
enc = tf.nn.dropout(enc, hparams.van_keep_prob)
enc = tf.contrib.layers.layer_norm(enc)
enc_history.append(enc)
enc = tf.layers.conv2d(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.layers.conv2d(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.layers.conv2d(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
return enc, enc_history |
<SYSTEM_TASK:>
The higher level structure encoder for the VAN.
<END_TASK>
<USER_TASK:>
Description:
def van_enc_2d(x, first_depth, reuse=False):
"""The higher level structure encoder for the VAN.
The high level structure is a vector instead of an image.
Args:
x: The higher level structure to encode.
first_depth: The depth of the first layer. Depth is increased in subsequent
layers.
reuse: To reuse in variable scope or not.
Returns:
The encoded image.
""" |
with tf.variable_scope('van_enc', reuse=reuse):
a = 4 # depends on the inputs size
b = 4
# a, b = 4,4
enc = tf.nn.relu(x)
enc = tf.layers.dense(enc, first_depth * a * b, tf.nn.relu)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.reshape(enc, [-1, a, b, first_depth])
enc = tf.layers.conv2d_transpose(
enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
van_higher_level_2 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 2])
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
enc = tf.contrib.layers.layer_norm(enc)
enc = tf.layers.conv2d_transpose(
enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
van_higher_level_4 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 4])
van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1)
return enc, van_higher_level |
<SYSTEM_TASK:>
The VAN decoder.
<END_TASK>
<USER_TASK:>
Description:
def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None):
"""The VAN decoder.
Args:
x: The analogy information to decode.
skip_connections: The encoder layers which can be used as skip connections.
output_shape: The shape of the desired output image.
first_depth: The depth of the first layer of the van image encoder.
hparams: The python hparams.
Returns:
The decoded image prediction.
""" |
with tf.variable_scope('van_dec'):
dec = tf.layers.conv2d_transpose(
x, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
first_depth * 2,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.layers.conv2d_transpose(
dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
dec = tf.contrib.layers.layer_norm(dec)
dec = tf.layers.conv2d_transpose(
dec,
output_shape[3] + 1,
3,
padding='same',
activation=tf.nn.relu,
strides=2)
dec = tf.nn.dropout(dec, hparams.van_keep_prob)
out_mask = tf.layers.conv2d_transpose(
dec, output_shape[3] + 1, 3, strides=1, padding='same', activation=None)
mask = tf.nn.sigmoid(out_mask[:, :, :, 3:4])
out = out_mask[:, :, :, :3]
return out * mask + skip_connections[0] * (1 - mask) |
<SYSTEM_TASK:>
Implements the deep analogy computation.
<END_TASK>
<USER_TASK:>
Description:
def analogy_computation_2d(f_first_enc,
f_first_frame,
f_current_enc,
first_depth):
"""Implements the deep analogy computation.""" |
with tf.variable_scope('analogy_computation'):
frame_enc_diff = f_first_frame - f_first_enc
frame_enc_diff_enc = tf.layers.conv2d(
frame_enc_diff,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
f_current_enc_enc = tf.layers.conv2d(
f_current_enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.concat([frame_enc_diff_enc, f_current_enc_enc], 3)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.contrib.layers.layer_norm(analogy)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
return tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1) |
<SYSTEM_TASK:>
Implements a VAN.
<END_TASK>
<USER_TASK:>
Description:
def van(first_enc,
first_frame,
current_enc,
gt_image,
reuse=False,
scope_prefix='',
hparams=None):
"""Implements a VAN.
Args:
first_enc: The first encoding.
first_frame: The first ground truth frame.
current_enc: The encoding of the frame to generate.
gt_image: The ground truth image, only used for regularization.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
Returns:
The generated image.
""" |
with tf.variable_scope(scope_prefix + 'van', reuse=reuse):
output_shape = first_frame.get_shape().as_list()
output_shape[0] = -1
first_depth = 64
f_first_enc, _ = van_enc_2d(first_enc, first_depth)
f_first_frame, image_enc_history = van_image_enc_2d(
first_frame, first_depth, hparams=hparams)
f_current_enc, van_higher_level = van_enc_2d(
current_enc, first_depth, reuse=True)
f_gt_image, _ = van_image_enc_2d(gt_image, first_depth, True,
hparams=hparams)
analogy_t = analogy_computation_2d(
f_first_enc, f_first_frame, f_current_enc, first_depth)
enc_img = f_current_enc + analogy_t
img = van_dec_2d(
enc_img, image_enc_history, output_shape, first_depth, hparams=hparams)
batch_size = tf.to_float(tf.shape(first_enc)[0])
r_loss = tf.nn.l2_loss(f_gt_image - f_current_enc - analogy_t) / batch_size
return img, r_loss, van_higher_level |
<SYSTEM_TASK:>
VGG network to use as encoder without the top few layers.
<END_TASK>
<USER_TASK:>
Description:
def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None,
is_training=True):
"""VGG network to use as encoder without the top few layers.
Can be pretrained.
Args:
x: The image to encode. In the range 0 to 1.
enc_final_size: The desired size of the encoding.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
is_training: boolean value indicating if training is happening.
Returns:
The generated image.
""" |
with tf.variable_scope(scope_prefix + 'encoder', reuse=reuse):
# Preprocess input
x *= 256
x = x - COLOR_NORMALIZATION_VECTOR
with arg_scope(vgg.vgg_arg_scope()):
# Padding because vgg_16 accepts images of size at least VGG_IMAGE_SIZE.
x = tf.pad(x, [[0, 0], [0, VGG_IMAGE_SIZE - IMG_WIDTH],
[0, VGG_IMAGE_SIZE - IMG_HEIGHT], [0, 0]])
_, end_points = vgg.vgg_16(
x,
num_classes=enc_final_size,
is_training=is_training)
pool5_key = [key for key in end_points.keys() if 'pool5' in key]
assert len(pool5_key) == 1
enc = end_points[pool5_key[0]]
# Undoing padding.
enc = tf.slice(enc, [0, 0, 0, 0], [-1, 2, 2, -1])
enc_shape = enc.get_shape().as_list()
enc_shape[0] = -1
enc_size = enc_shape[1] * enc_shape[2] * enc_shape[3]
enc_flat = tf.reshape(enc, (-1, enc_size))
enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob)
enc_flat = tf.layers.dense(
enc_flat,
enc_final_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=1e-4,))
if hparams.enc_pred_use_l2norm:
enc_flat = tf.nn.l2_normalize(enc_flat, 1)
return enc_flat |
<SYSTEM_TASK:>
Constructs the tensorflow graph of the hierarchical model.
<END_TASK>
<USER_TASK:>
Description:
def construct_model(images,
actions=None,
context_frames=2,
hparams=None,
is_training=True):
"""Constructs the tensorflow graph of the hierarchical model.""" |
pred_depth = 20
enc_out_all, pred_out_all, van_out_all, van_on_enc_all = [], [], [], []
lstm_states = [None] * (pred_depth + 2)
enc_out = encoder_vgg(
images[0], hparams.enc_size, False, scope_prefix='timestep/',
hparams=hparams, is_training=is_training)
enc_out = tf.identity(enc_out, 'enc_out')
enc_out_all.append(enc_out)
num_timesteps = len(actions) - 1
sum_freq = int(num_timesteps / 4 + 1)
reuse = False
for timestep, action in zip(range(len(actions) - 1), actions[:-1]):
done_warm_start = timestep > context_frames - 1
with tf.variable_scope('timestep', reuse=reuse):
if done_warm_start:
pred_input = pred_out_all[-1]
else:
pred_input = enc_out_all[-1]
pred_out = predictor(
pred_input, action, lstm_states, pred_depth, False, hparams=hparams)
pred_out = tf.identity(pred_out, 'pred_out')
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('pred_out', pred_out)
pred_out_all.append(pred_out)
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('lstm_state', lstm_states[0])
van_out, _, _ = van(
enc_out_all[0],
images[0],
pred_out,
images[timestep + 1],
tf.AUTO_REUSE,
hparams=hparams)
van_out = tf.identity(van_out, 'van_out')
van_out_all.append(van_out)
enc_out = encoder_vgg(
images[timestep + 1], hparams.enc_size, True, hparams=hparams,
is_training=is_training)
enc_out = tf.identity(enc_out, 'enc_out')
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('enc_out', enc_out)
enc_out_all.append(enc_out)
van_input = images[0]
enc_noise = tf.zeros_like(enc_out)
if timestep % sum_freq == 0: # and not hparams.use_tpu:
tf.summary.histogram('enc_noise', enc_noise)
van_on_enc, _, _ = van(
enc_out_all[0],
van_input,
enc_out + enc_noise,
images[timestep + 1],
tf.AUTO_REUSE,
hparams=hparams)
van_on_enc = tf.identity(van_on_enc, 'van_on_enc')
van_on_enc_all.append(van_on_enc)
reuse = True
return enc_out_all, pred_out_all, van_out_all, van_on_enc_all |
<SYSTEM_TASK:>
Image quality metric based on maximal signal power vs. power of the noise.
<END_TASK>
<USER_TASK:>
Description:
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
""" |
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0) |
<SYSTEM_TASK:>
L2 distance between tensors true and pred.
<END_TASK>
<USER_TASK:>
Description:
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
""" |
result = tf.reduce_sum(
tf.squared_difference(true, pred)) / tf.to_float(tf.size(pred))
return result |
<SYSTEM_TASK:>
L1 distance between tensors true and pred.
<END_TASK>
<USER_TASK:>
Description:
def l1_error(true, pred):
"""L1 distance between tensors true and pred.""" |
return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred)) |
<SYSTEM_TASK:>
Calculates loss and psnr for predictions over multiple timesteps.
<END_TASK>
<USER_TASK:>
Description:
def calc_loss_psnr(gen_images, images, name, hparams=None, use_l1_loss=False):
"""Calculates loss and psnr for predictions over multiple timesteps.""" |
del hparams
with tf.name_scope(name):
loss, error, psnr_all = 0.0, 0.0, 0.0
for _, x, gx in zip(range(len(gen_images)), images, gen_images):
recon_cost = mean_squared_error(x, gx)
if use_l1_loss:
recon_cost = l1_error(x, gx)
error_i = l1_error(x, gx)
psnr_i = peak_signal_to_noise_ratio(x, gx)
psnr_all += psnr_i
error += error_i
loss += recon_cost
psnr_all /= tf.to_float(len(gen_images))
loss /= tf.to_float(len(gen_images))
error /= tf.to_float(len(gen_images))
# if not hparams.use_tpu:
tf.summary.scalar('psnr_all', psnr_all)
tf.summary.scalar('loss', loss)
return loss, psnr_all |
<SYSTEM_TASK:>
SV2P model for atari with softmax.
<END_TASK>
<USER_TASK:>
Description:
def next_frame_sv2p_atari_softmax():
"""SV2P model for atari with softmax.""" |
hparams = next_frame_sv2p_atari()
hparams.bottom = {}
hparams.loss = {}
hparams.top = {}
hparams.internal_loss = True
return hparams |
<SYSTEM_TASK:>
SV2P model with additional cutoff in L2 loss for environments like pong.
<END_TASK>
<USER_TASK:>
Description:
def next_frame_sv2p_cutoff():
"""SV2P model with additional cutoff in L2 loss for environments like pong.""" |
hparams = next_frame_sv2p()
hparams.video_modality_loss_cutoff = 0.4
hparams.video_num_input_frames = 4
hparams.video_num_target_frames = 1
return hparams |
<SYSTEM_TASK:>
Download and extract MSCOCO datasets to directory unless it is there.
<END_TASK>
<USER_TASK:>
Description:
def _get_mscoco(directory):
"""Download and extract MSCOCO datasets to directory unless it is there.""" |
for url in _MSCOCO_URLS:
filename = os.path.basename(url)
download_url = os.path.join(_MSCOCO_ROOT_URL, url)
path = generator_utils.maybe_download(directory, filename, download_url)
unzip_dir = os.path.join(directory, filename.strip(".zip"))
if not tf.gfile.Exists(unzip_dir):
zipfile.ZipFile(path, "r").extractall(directory) |
<SYSTEM_TASK:>
Image generator for MSCOCO captioning problem with token-wise captions.
<END_TASK>
<USER_TASK:>
Description:
def mscoco_generator(data_dir,
tmp_dir,
training,
how_many,
start_from=0,
eos_list=None,
vocab_filename=None):
"""Image generator for MSCOCO captioning problem with token-wise captions.
Args:
data_dir: path to the data directory.
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
eos_list: optional list of end of sentence tokens, otherwise use default
value `1`.
vocab_filename: file within `tmp_dir` to read vocabulary from.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as JPEG,
* image/format: the string "jpeg" representing image format,
* image/class/label: a list of integers representing the caption,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a list of the corresponding type.
""" |
eos_list = [1] if eos_list is None else eos_list
def get_vocab():
"""Get vocab for caption text encoder."""
if data_dir is not None and vocab_filename is not None:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath)
return vocab_symbolizer
else:
raise ValueError("Vocab file does not exist: %s" % vocab_filepath)
return None
vocab_symbolizer = get_vocab()
_get_mscoco(tmp_dir)
caption_filepath = (
_MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE)
caption_filepath = os.path.join(tmp_dir, caption_filepath)
prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX
caption_file = io.open(caption_filepath)
caption_json = json.load(caption_file)
# Dictionary from image_id to ((filename, height, width), captions).
image_dict = {}
for image in caption_json["images"]:
image_dict[image["id"]] = [(image["file_name"], image["height"],
image["width"]), []]
annotations = caption_json["annotations"]
annotation_count = len(annotations)
image_count = len(image_dict)
tf.logging.info("Processing %d images and %d labels\n" % (image_count,
annotation_count))
for annotation in annotations:
image_id = annotation["image_id"]
image_dict[image_id][1].append(annotation["caption"])
data = list(image_dict.values())[start_from:start_from + how_many]
random.shuffle(data)
for image_info, labels in data:
image_filename = image_info[0]
image_filepath = os.path.join(tmp_dir, prefix, image_filename)
with tf.gfile.Open(image_filepath, "rb") as f:
encoded_image_data = f.read()
height, width = image_info[1], image_info[2]
for label in labels:
if vocab_filename is None or vocab_symbolizer is None:
label = [ord(c) for c in label] + eos_list
else:
label = vocab_symbolizer.encode(label) + eos_list
yield {
"image/encoded": [encoded_image_data],
"image/format": ["jpeg"],
"image/class/label": label,
"image/height": [height],
"image/width": [width]
} |
<SYSTEM_TASK:>
Convert FLAGS to list of args suitable for passing on cmd line.
<END_TASK>
<USER_TASK:>
Description:
def flags_as_args():
"""Convert FLAGS to list of args suitable for passing on cmd line.""" |
if hasattr(FLAGS, "flag_values_dict"):
args_dict = FLAGS.flag_values_dict()
else:
args_dict = dict(FLAGS.__dict__["__flags"])
del args_dict["cloud_mlengine"]
# Configured later
del args_dict["t2t_usr_dir"]
args_dict.pop("h", None)
args_dict.pop("helpfull", None)
args_dict.pop("helpshort", None)
args_dict.pop("help", None)
args = []
for name, val in args_dict.items():
if val is None:
continue
if name.startswith("autotune"):
continue
args.extend(["--%s=%s" % (name, str(val))])
return args |
<SYSTEM_TASK:>
Decorator for Layers, overriding add_weight for trainable initializers.
<END_TASK>
<USER_TASK:>
Description:
def add_weight(cls):
"""Decorator for Layers, overriding add_weight for trainable initializers.""" |
@functools.wraps(cls.add_weight)
def _add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
**kwargs):
"""Adds weight."""
if isinstance(initializer, tf.keras.layers.Layer):
weight = initializer(shape, dtype)
self._trainable_weights.extend(initializer.trainable_weights) # pylint: disable=protected-access
self._non_trainable_weights.extend(initializer.non_trainable_weights) # pylint: disable=protected-access
if regularizer is not None:
# TODO(trandustin): Replace need for this with
# Layer._handle_weight_regularization. For Eager compatibility, random
# variable __init__s cannot apply TF ops (cl/220898007).
def loss_fn():
"""Creates a regularization loss `Tensor`."""
with tf.name_scope(name + '/Regularizer'):
return regularizer(initializer(shape, dtype))
self.add_loss(loss_fn)
return weight
return super(cls, self).add_weight(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
**kwargs)
cls.add_weight = _add_weight
return cls |
<SYSTEM_TASK:>
Get the KL multiplier, either dynamically or schedule based.
<END_TASK>
<USER_TASK:>
Description:
def get_beta(self, kl_loss=0.0):
"""Get the KL multiplier, either dynamically or schedule based.
if hparams.latent_loss_multiplier_dynamic is set to true, then beta
is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon.
In order to do so, the beta is being updated at each iteration
by taking steps of size hparams.latent_loss_multiplier_alpha.
The same formulation can be retrieved by solving the Lagrangian
with KL < epsilon as a constraint.
Args:
kl_loss: KL loss. Only used for dynamic adjustment.
Returns:
beta: the final value of beta.
""" |
if self.hparams.latent_loss_multiplier_dynamic:
beta = tf.Variable(self.hparams.latent_loss_multiplier,
trainable=False, dtype=tf.float32)
alpha = self.hparams.latent_loss_multiplier_alpha
epsilon = self.hparams.latent_loss_multiplier_epsilon
shadow_beta = beta + alpha * (kl_loss - epsilon)
# Caping the beta between 0 and 1. May need to change this later on.
shadow_beta = tf.maximum(shadow_beta, 0.0)
shadow_beta = tf.minimum(shadow_beta, 1.0)
update_op = tf.assign(beta, shadow_beta)
else:
beta = common_video.beta_schedule(
schedule=self.hparams.latent_loss_multiplier_schedule,
global_step=self.get_iteration_num(),
final_beta=self.hparams.latent_loss_multiplier,
decay_start=(self.hparams.num_iterations_1st_stage +
self.hparams.num_iterations_2nd_stage),
decay_end=self.hparams.anneal_end)
update_op = tf.identity(beta) # fake update for regular beta.
with tf.control_dependencies([update_op]):
tf.summary.scalar("beta", beta)
return beta |
<SYSTEM_TASK:>
Get KL loss for all the predicted Gaussians.
<END_TASK>
<USER_TASK:>
Description:
def get_kl_loss(self, means, log_vars, means_p=None, log_vars_p=None):
"""Get KL loss for all the predicted Gaussians.""" |
kl_loss = 0.0
if means_p is None:
means_p = tf.unstack(tf.zeros_like(means))
if log_vars_p is None:
log_vars_p = tf.unstack(tf.zeros_like(log_vars))
enumerated_inputs = enumerate(zip(means, log_vars, means_p, log_vars_p))
if self.is_training and self.hparams.stochastic_model:
for i, (mean, log_var, mean_p, log_var_p) in enumerated_inputs:
kl_loss += common_layers.kl_divergence(mean, log_var, mean_p, log_var_p)
tf.summary.histogram("posterior_mean_%d" % i, mean)
tf.summary.histogram("posterior_log_var_%d" % i, log_var)
tf.summary.histogram("prior_mean_%d" % i, mean_p)
tf.summary.histogram("prior_log_var_%d" % i, log_var_p)
tf.summary.scalar("kl_raw", tf.reduce_mean(kl_loss))
beta = self.get_beta(kl_loss)
# information capacity from "Understanding disentangling in beta-VAE"
if self.hparams.information_capacity > 0.0:
kl_loss = tf.abs(kl_loss - self.hparams.information_capacity)
return beta * kl_loss |
<SYSTEM_TASK:>
Encode transformer inputs.
<END_TASK>
<USER_TASK:>
Description:
def transformer_encode(encoder_function, inputs, target_space, hparams,
attention_weights=None, features=None, losses=None,
**kwargs):
"""Encode transformer inputs.
Args:
encoder_function: the encoder function
inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which
will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to encoder_function
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encoder-decoder attention. [batch_size, input_length]
""" |
inputs = common_layers.flatten4d3d(inputs)
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
transformer_prepare_encoder(
inputs, target_space, hparams, features=features))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
attn_bias_for_padding = None
# Otherwise the encoder will just use encoder_self_attention_bias.
if hparams.unidirectional_encoder:
attn_bias_for_padding = encoder_decoder_attention_bias
encoder_output = encoder_function(
encoder_input,
self_attention_bias,
hparams,
nonpadding=features_to_nonpadding(features, "inputs"),
save_weights_to=attention_weights,
make_image_summary=not common_layers.is_xla_compiled(),
losses=losses,
attn_bias_for_padding=attn_bias_for_padding,
**kwargs)
return encoder_output, encoder_decoder_attention_bias |
<SYSTEM_TASK:>
Decode Transformer outputs from encoder representation.
<END_TASK>
<USER_TASK:>
Description:
def transformer_decode(decoder_function,
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
attention_weights=None,
cache=None,
decode_loop_step=None,
nonpadding=None,
losses=None,
**kwargs):
"""Decode Transformer outputs from encoder representation.
Args:
decoder_function: the decoder function
decoder_input: inputs to bottom of the model. [batch_size, decoder_length,
hidden_dim]
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
nonpadding: optional Tensor with shape [batch_size, decoder_length]
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to decoder_function
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
""" |
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_output = decoder_function(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=cache,
decode_loop_step=decode_loop_step,
nonpadding=nonpadding,
save_weights_to=attention_weights,
losses=losses,
**kwargs)
if (common_layers.is_xla_compiled() and
hparams.mode == tf.estimator.ModeKeys.TRAIN):
# TPU does not react kindly to extra dimensions.
# TODO(noam): remove this once TPU is more forgiving of extra dims.
return decoder_output
else:
# Expand since t2t expects 4d tensors.
return tf.expand_dims(decoder_output, axis=2) |
<SYSTEM_TASK:>
Create the initial cache for Transformer fast decoding.
<END_TASK>
<USER_TASK:>
Description:
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix):
"""Create the initial cache for Transformer fast decoding.""" |
key_channels = hparams.attention_key_channels or hparams.hidden_size
value_channels = hparams.attention_value_channels or hparams.hidden_size
num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers
vars_3d_num_heads = (
hparams.num_heads if hparams.get("attention_variables_3d") else 0)
if cache is None:
cache = {}
cache.update({
"layer_%d" % layer: { # pylint: disable=g-complex-comprehension
"k":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
key_channels]), hparams.num_heads),
"v":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
value_channels]), hparams.num_heads),
} for layer in range(num_layers)
})
# If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the
# cache key "f" won't be used, which means that the` shape of cache["f"]`
# won't be changed to
# `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause
# error when applying `nest.map reshape function` on it.
if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]:
for layer in range(num_layers):
cache["layer_%d" % layer]["f"] = tf.zeros(
[batch_size, 0, hparams.hidden_size])
if encoder_output is not None:
for layer in range(num_layers):
layer_name = "layer_%d" % layer
with tf.variable_scope(
"%sdecoder/%s/encdec_attention/multihead_attention" %
(scope_prefix, layer_name)):
k_encdec = common_attention.compute_attention_component(
encoder_output,
key_channels,
name="k",
vars_3d_num_heads=vars_3d_num_heads)
k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads)
v_encdec = common_attention.compute_attention_component(
encoder_output,
value_channels,
name="v",
vars_3d_num_heads=vars_3d_num_heads)
v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads)
cache[layer_name]["k_encdec"] = k_encdec
cache[layer_name]["v_encdec"] = v_encdec
cache["encoder_output"] = encoder_output
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
return cache |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.