INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Download filename from uri unless it's already in directory.
Copies a remote file to local if that local file does not already exist. If
the local file pre-exists this function call, it does not check that the local
file is a copy of the remote.
Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a
URL.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
uri: URI to copy (or download) from.
Returns:
The path to the downloaded file. | def maybe_download(directory, filename, uri):
"""Download filename from uri unless it's already in directory.
Copies a remote file to local if that local file does not already exist. If
the local file pre-exists this function call, it does not check that the local
file is a copy of the remote.
Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a
URL.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
uri: URI to copy (or download) from.
Returns:
The path to the downloaded file.
"""
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
tf.logging.info("Downloading %s to %s" % (uri, filepath))
try:
tf.gfile.Copy(uri, filepath)
except tf.errors.UnimplementedError:
if uri.startswith("http"):
inprogress_filepath = filepath + ".incomplete"
inprogress_filepath, _ = urllib.urlretrieve(
uri, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress
print()
tf.gfile.Rename(inprogress_filepath, filepath)
else:
raise ValueError("Unrecognized URI: " + filepath)
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." %
(filename, statinfo.st_size))
return filepath |
Download filename from Google drive unless it's already in directory.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
url: URL to download from.
Returns:
The path to the downloaded file. | def maybe_download_from_drive(directory, filename, url):
"""Download filename from Google drive unless it's already in directory.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
url: URL to download from.
Returns:
The path to the downloaded file.
"""
if not tf.gfile.Exists(directory):
tf.logging.info("Creating directory %s" % directory)
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
confirm_token = None
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
# Since the file is big, drive will scan it for virus and take it to a
# warning page. We find the confirm token on this page and append it to the
# URL to start the download process.
confirm_token = None
session = requests.Session()
response = session.get(url, stream=True)
for k, v in response.cookies.items():
if k.startswith("download_warning"):
confirm_token = v
if confirm_token:
url = url + "&confirm=" + confirm_token
tf.logging.info("Downloading %s to %s" % (url, filepath))
response = session.get(url, stream=True)
# Now begin the download.
chunk_size = 16 * 1024
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
# Print newline to clear the carriage return from the download progress
print()
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." % (filename,
statinfo.st_size))
return filepath |
Unzips from gz_path into new_path.
Args:
gz_path: path to the zipped file.
new_path: path to where the file will be unzipped. | def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path.
Args:
gz_path: path to the zipped file.
new_path: path to where the file will be unzipped.
"""
if tf.gfile.Exists(new_path):
tf.logging.info("File %s already exists, skipping unpacking" % new_path)
return
tf.logging.info("Unpacking %s to %s" % (gz_path, new_path))
# We may be unpacking into a newly created directory, add write mode.
mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH
os.chmod(os.path.dirname(new_path), mode)
with gzip.open(gz_path, "rb") as gz_file:
with tf.gfile.GFile(new_path, mode="wb") as new_file:
for line in gz_file:
new_file.write(line) |
Inner implementation for vocab generators.
Args:
data_dir: The base directory where data and vocab files are stored. If None,
then do not save the vocab even if it doesn't exist.
vocab_filename: relative filename where vocab file is stored
vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
generator: a generator that produces tokens from the vocabulary
max_subtoken_length: an optional integer. Set this to a finite value to
avoid quadratic costs during vocab building.
reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
should be a prefix of `reserved_tokens`. If `None`, defaults to
`RESERVED_TOKENS`.
Returns:
A SubwordTextEncoder vocabulary object. | def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generator, max_subtoken_length=None,
reserved_tokens=None):
"""Inner implementation for vocab generators.
Args:
data_dir: The base directory where data and vocab files are stored. If None,
then do not save the vocab even if it doesn't exist.
vocab_filename: relative filename where vocab file is stored
vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
generator: a generator that produces tokens from the vocabulary
max_subtoken_length: an optional integer. Set this to a finite value to
avoid quadratic costs during vocab building.
reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
should be a prefix of `reserved_tokens`. If `None`, defaults to
`RESERVED_TOKENS`.
Returns:
A SubwordTextEncoder vocabulary object.
"""
if data_dir and vocab_filename:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
return text_encoder.SubwordTextEncoder(vocab_filepath)
else:
vocab_filepath = None
tf.logging.info("Generating vocab file: %s", vocab_filepath)
vocab = text_encoder.SubwordTextEncoder.build_from_generator(
generator, vocab_size, max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
if vocab_filepath:
tf.gfile.MakeDirs(data_dir)
vocab.store_to_file(vocab_filepath)
return vocab |
Generate a vocabulary from the datasets in sources. | def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size,
sources, file_byte_budget=1e6,
max_subtoken_length=None):
"""Generate a vocabulary from the datasets in sources."""
vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget)
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
vocab_generator, max_subtoken_length) |
Generate lines for vocabulary generation. | def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6):
"""Generate lines for vocabulary generation."""
tf.logging.info("Generating vocab from: %s", str(sources))
for source in sources:
url = source[0]
filename = os.path.basename(url)
compressed_file = maybe_download(tmp_dir, filename, url)
for lang_file in source[1]:
tf.logging.info("Reading file: %s" % lang_file)
filepath = os.path.join(tmp_dir, lang_file)
# Extract from tar if needed.
if not tf.gfile.Exists(filepath):
read_type = "r:gz" if filename.endswith("tgz") else "r"
with tarfile.open(compressed_file, read_type) as corpus_tar:
corpus_tar.extractall(tmp_dir)
# For some datasets a second extraction is necessary.
if lang_file.endswith(".gz"):
new_filepath = os.path.join(tmp_dir, lang_file[:-3])
if tf.gfile.Exists(new_filepath):
tf.logging.info(
"Subdirectory %s already exists, skipping unpacking" % filepath)
else:
tf.logging.info("Unpacking subdirectory %s" % filepath)
gunzip_file(filepath, new_filepath)
filepath = new_filepath
with tf.gfile.GFile(filepath, mode="r") as source_file:
file_byte_budget_ = file_byte_budget
counter = 0
countermax = int(source_file.size() / file_byte_budget_ / 2)
for line in source_file:
if counter < countermax:
counter += 1
else:
if file_byte_budget_ <= 0:
break
line = line.strip()
file_byte_budget_ -= len(line)
counter = 0
yield line |
r"""Generate a vocabulary from a tabbed source file.
The source is a file of source, target pairs, where each line contains
a source string and a target string, separated by a tab ('\t') character.
The index parameter specifies 0 for the source or 1 for the target.
Args:
data_dir: path to the data directory.
tmp_dir: path to the temporary directory.
source_filename: the name of the tab-separated source file.
index: index.
vocab_filename: the name of the vocabulary file.
vocab_size: vocabulary size.
Returns:
The vocabulary. | def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename,
index, vocab_filename, vocab_size):
r"""Generate a vocabulary from a tabbed source file.
The source is a file of source, target pairs, where each line contains
a source string and a target string, separated by a tab ('\t') character.
The index parameter specifies 0 for the source or 1 for the target.
Args:
data_dir: path to the data directory.
tmp_dir: path to the temporary directory.
source_filename: the name of the tab-separated source file.
index: index.
vocab_filename: the name of the vocabulary file.
vocab_size: vocabulary size.
Returns:
The vocabulary.
"""
def generate():
filepath = os.path.join(tmp_dir, source_filename)
tf.logging.info("Generating vocab from %s", filepath)
with tf.gfile.GFile(filepath, mode="r") as source_file:
for line in source_file:
line = line.strip()
if line and "\t" in line:
parts = line.split("\t", 1)
part = parts[index].strip()
yield part
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate()) |
Generate a vocabulary from txt files with example-per-line. | def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size,
filepatterns):
"""Generate a vocabulary from txt files with example-per-line."""
if isinstance(filepatterns, str):
filepatterns = [filepatterns]
def generate():
tf.logging.info("Generating vocab from %s", filepatterns)
for filepattern in filepatterns:
for filename in tf.gfile.Glob(filepattern):
with tf.gfile.GFile(filename, mode="r") as source_file:
for line in source_file:
yield line.strip()
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate()) |
Shuffle a single file of records.
Args:
fname: a string
extra_fn: an optional function from list of TFRecords to list of TFRecords
to be called after shuffling. | def _shuffle_single(fname, extra_fn=None):
"""Shuffle a single file of records.
Args:
fname: a string
extra_fn: an optional function from list of TFRecords to list of TFRecords
to be called after shuffling.
"""
records = read_records(fname)
random.shuffle(records)
if extra_fn is not None:
records = extra_fn(records)
out_fname = fname.replace(UNSHUFFLED_SUFFIX, "")
write_records(records, out_fname)
tf.gfile.Remove(fname) |
Shuffles the dataset.
Args:
filenames: a list of strings
extra_fn: an optional function from list of records to list of records
to be called after shuffling a file. | def shuffle_dataset(filenames, extra_fn=None):
"""Shuffles the dataset.
Args:
filenames: a list of strings
extra_fn: an optional function from list of records to list of records
to be called after shuffling a file.
"""
if outputs_exist(filenames):
tf.logging.info("Skipping shuffle because output files exist")
return
tf.logging.info("Shuffling data...")
for filename in filenames:
_shuffle_single(filename, extra_fn=extra_fn)
tf.logging.info("Data shuffled.") |
Pack examples into longer examples.
If has_inputs=False, we are packing single-sequence examples with
targets only and no inputs.
In this case, we concatenate the targets from several examples to form
each new example. We insert a number of zeros for spacing between the
original sequences. This is to help the sequences stay separate
under convolutions. If chop_long_sequences is set, then any input sequence
longer than packed_length gets chopped up into multiple examples. Otherwise,
long sequences are emitted as singletons.
If has_inputs=True, then we are packing sequence-to-sequence
examples. We combine several examples by concatenating the inputs
(as above) and concatenating the targets (as above). Chopping of
long sequences is not supported.
The packed examples are represented as dictionaries containing:
"inputs", "targets": the packed sequences described above
"inputs_segmentation", "targets_segmentation":
Sequences aligned with "inputs", "targets" specifying to which original
sequence each position belongs. Numbering starts from 1, and 0 is used
for spacing. This information is useful for preventing attention across
segments.
e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4]
"inputs_position", "targets_position":
Sequences aligned with "inputs", "targets" specifying position within
the original sequence. This is useful for positional encodings.
e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2]
Args:
examples: a generator returning feature dictionaries.
has_inputs: a boolean
packed_length: an integer
spacing: an integer
queue_size: an integer
chop_long_sequences: a boolean
Yields:
feature dictionaries. | def pack_examples(examples,
has_inputs,
packed_length=256,
spacing=2,
queue_size=10,
chop_long_sequences=False):
"""Pack examples into longer examples.
If has_inputs=False, we are packing single-sequence examples with
targets only and no inputs.
In this case, we concatenate the targets from several examples to form
each new example. We insert a number of zeros for spacing between the
original sequences. This is to help the sequences stay separate
under convolutions. If chop_long_sequences is set, then any input sequence
longer than packed_length gets chopped up into multiple examples. Otherwise,
long sequences are emitted as singletons.
If has_inputs=True, then we are packing sequence-to-sequence
examples. We combine several examples by concatenating the inputs
(as above) and concatenating the targets (as above). Chopping of
long sequences is not supported.
The packed examples are represented as dictionaries containing:
"inputs", "targets": the packed sequences described above
"inputs_segmentation", "targets_segmentation":
Sequences aligned with "inputs", "targets" specifying to which original
sequence each position belongs. Numbering starts from 1, and 0 is used
for spacing. This information is useful for preventing attention across
segments.
e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4]
"inputs_position", "targets_position":
Sequences aligned with "inputs", "targets" specifying position within
the original sequence. This is useful for positional encodings.
e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2]
Args:
examples: a generator returning feature dictionaries.
has_inputs: a boolean
packed_length: an integer
spacing: an integer
queue_size: an integer
chop_long_sequences: a boolean
Yields:
feature dictionaries.
"""
packer = SequencePairPacker if has_inputs else SequencePacker
combined = []
for example in examples:
x = ((example["inputs"], example["targets"])
if has_inputs else example["targets"])
if chop_long_sequences and len(x) > packed_length:
assert not has_inputs
num_fragments = len(x) // packed_length
for i in range(num_fragments):
yield packer(
x[packed_length * i:packed_length * (i + 1)], spacing).to_dict()
x = x[packed_length * num_fragments:]
added = False
for c in combined:
if c.can_fit(x, packed_length):
c.add(x)
added = True
break
if not added:
if len(combined) == queue_size:
yield combined[0].to_dict()
combined = combined[1:]
combined.append(packer(x, spacing))
for c in combined:
yield c.to_dict() |
Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset. | def _pack_with_custom_ops(dataset, keys, length):
"""Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset.
"""
from tensor2tensor.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top
# faster and better packing but requires custom-built binary.
k1, k2 = keys
def map_fn_custom(x):
"""Map-function."""
(k1_packed, k1_segmengation, k1_position,
k2_packed, k2_segmentation, k2_position) = (
pack_sequences_ops.pack_sequences2(x[k1], x[k2], length))
packed = {
k1: k1_packed,
k1 + "_segmentation": k1_segmengation,
k1 + "_position": k1_position,
k2: k2_packed,
k2 + "_segmentation": k2_segmentation,
k2 + "_position": k2_position,
}
return tf.data.Dataset.from_tensor_slices(packed)
dataset = dataset.flat_map(map_fn_custom)
return dataset |
Make a temporary directory. | def make_tmp_dir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
"""Make a temporary directory."""
if dir is None:
return tempfile.mkdtemp(suffix, prefix, dir)
else:
while True:
rand_term = random.randint(1, 9999)
tmp_dir = os.path.join(dir, "%s%d%s" % (prefix, rand_term, suffix))
if tf.gfile.Exists(tmp_dir):
continue
tf.gfile.MakeDirs(tmp_dir)
break
return tmp_dir |
Iterate over the records on disk for the Problem. | def tfrecord_iterator_for_problem(problem, data_dir,
dataset_split=tf.estimator.ModeKeys.TRAIN):
"""Iterate over the records on disk for the Problem."""
filenames = tf.gfile.Glob(problem.filepattern(data_dir, mode=dataset_split))
example_spec = problem.example_reading_spec()[0]
return tfrecord_iterator(filenames, example_spec=example_spec) |
Yields records from TFRecord files.
Args:
filenames: list<str>, list of TFRecord filenames to read from.
gzipped: bool, whether the TFRecord files are gzip-encoded.
example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
if provided, will parse each record as a tensorflow.Example proto.
Yields:
Records (or parsed Examples, if example_spec is provided) from files. | def tfrecord_iterator(filenames, gzipped=False, example_spec=None):
"""Yields records from TFRecord files.
Args:
filenames: list<str>, list of TFRecord filenames to read from.
gzipped: bool, whether the TFRecord files are gzip-encoded.
example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
if provided, will parse each record as a tensorflow.Example proto.
Yields:
Records (or parsed Examples, if example_spec is provided) from files.
"""
with tf.Graph().as_default():
dataset = tf.data.Dataset.from_tensor_slices(filenames)
def _load_records(filename):
return tf.data.TFRecordDataset(
filename,
compression_type=tf.constant("GZIP") if gzipped else None,
buffer_size=16 * 1000 * 1000)
dataset = dataset.flat_map(_load_records)
def _parse_example(ex_ser):
return tf.parse_single_example(ex_ser, example_spec)
if example_spec:
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
yield ex
except tf.errors.OutOfRangeError:
break |
Create a fill-in-the-blanks training example from text.
Split on spaces, then cut into segments at random points. Alternate segments
are assigned to the two output strings. separator_symbol separates segments
within each of the outputs.
example:
text="The quick brown fox jumps over the lazy dog."
returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.")
The two outputs can also be reversed to yield an instance of the same problem.
Args:
text: a string
separator_symbol: a string
Returns:
a pair of strings | def random_deinterleave(text, separator_symbol="X"):
"""Create a fill-in-the-blanks training example from text.
Split on spaces, then cut into segments at random points. Alternate segments
are assigned to the two output strings. separator_symbol separates segments
within each of the outputs.
example:
text="The quick brown fox jumps over the lazy dog."
returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.")
The two outputs can also be reversed to yield an instance of the same problem.
Args:
text: a string
separator_symbol: a string
Returns:
a pair of strings
"""
words = text.strip().split(" ")
n = len(words)
if n <= 1:
return text, ""
cut = [False] * n
cut[0] = True
num_cuts = int(math.exp(random.uniform(0, math.log(n))))
for _ in range(num_cuts):
cut[random.randint(1, n -1)] = True
out = [[], []]
part = random.randint(0, 1)
for i in range(n):
if cut[i]:
out[part].append(separator_symbol)
part = 1 - part
out[part].append(words[i])
return " ".join(out[0]), " ".join(out[1]) |
The core Neural GPU. | def neural_gpu_body(inputs, hparams, name=None):
"""The core Neural GPU."""
with tf.variable_scope(name, "neural_gpu"):
def step(state, inp): # pylint: disable=missing-docstring
x = tf.nn.dropout(state, 1.0 - hparams.dropout)
for layer in range(hparams.num_hidden_layers):
x = common_layers.conv_gru(
x, (hparams.kernel_height, hparams.kernel_width),
hparams.hidden_size,
name="cgru_%d" % layer)
# Padding input is zeroed-out in the modality, we check this by summing.
padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
new_state = tf.where(padding_inp, state, x) # No-op where inp is padding.
return new_state
return tf.foldl(
step,
tf.transpose(inputs, [1, 0, 2, 3]),
initializer=inputs,
parallel_iterations=1,
swap_memory=True) |
Improved Neural GPU as in https://arxiv.org/abs/1702.08727. | def diagonal_neural_gpu(inputs, hparams, name=None):
"""Improved Neural GPU as in https://arxiv.org/abs/1702.08727."""
with tf.variable_scope(name, "diagonal_neural_gpu"):
def step(state_tup, inp):
"""Single step of the improved Neural GPU."""
state, _ = state_tup
x = state
for layer in range(hparams.num_hidden_layers):
x, new_loss = common_layers.diagonal_conv_gru(
x, (hparams.kernel_height, hparams.kernel_width),
hparams.hidden_size,
dropout=hparams.dropout,
name="dcgru_%d" % layer)
# Padding input is zeroed-out in the modality, we check this by summing.
padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
new_state = tf.where(padding_inp, state, x) # No-op where inp is padding.
return new_state, new_loss
final_state, losses = tf.scan(
step,
tf.transpose(inputs, [1, 0, 2, 3]),
initializer=(inputs, tf.constant(0.0)),
parallel_iterations=1,
swap_memory=True)
return final_state[0, :, :, :, :], 2.0 * tf.reduce_mean(losses) |
Helper to determine the shape of reorder output. | def _reorder_shape(input_shape, output=None): # pylint: disable=invalid-name
"""Helper to determine the shape of reorder output."""
if output is None:
return input_shape
return base.nested_map(output, lambda i: input_shape[i]) |
Reorder a tuple into another tuple.
For example, we can re-order (x, y) into (y, x) or even (y, (x, y), y).
The output argument specifies how to re-order, using integers that refer
to indices in the input tuple. For example, if
input = (x, y, z)
then
Reorder(input, output=(1, 0, 2)) = (y, x, z)
Reorder(input, output=(0, 0)) = (x, x)
Reorder(input, output=(0, (1, 1))) = (x, (y, y))
Reorder(input, output=((2, 0), (1, 1))) = ((z, x), (y, y))
By default (if no output is given) Reorder does nothing (Identity).
Args:
x: the input tuple to re-order.
params: layer parameters (unused).
output: the specification of the output tuple: a nested tuple of ints.
**kwargs: other arguments (unused).
Returns:
The re-ordered tuple with the same shape as output. | def Reorder(x, params, output=None, **kwargs):
"""Reorder a tuple into another tuple.
For example, we can re-order (x, y) into (y, x) or even (y, (x, y), y).
The output argument specifies how to re-order, using integers that refer
to indices in the input tuple. For example, if
input = (x, y, z)
then
Reorder(input, output=(1, 0, 2)) = (y, x, z)
Reorder(input, output=(0, 0)) = (x, x)
Reorder(input, output=(0, (1, 1))) = (x, (y, y))
Reorder(input, output=((2, 0), (1, 1))) = ((z, x), (y, y))
By default (if no output is given) Reorder does nothing (Identity).
Args:
x: the input tuple to re-order.
params: layer parameters (unused).
output: the specification of the output tuple: a nested tuple of ints.
**kwargs: other arguments (unused).
Returns:
The re-ordered tuple with the same shape as output.
"""
del params, kwargs
if output is None:
return x
return base.nested_map(output, lambda i: x[i]) |
Helper: sum a list of arrays or nested arrays. | def _nested_op(inputs, op): # pylint: disable=invalid-name
"""Helper: sum a list of arrays or nested arrays."""
# First the simple non-nested case.
if not isinstance(inputs[0], (list, tuple)):
return op(inputs)
# In the nested case, sum on each axis separately.
result_list = []
for i in range(len(inputs[0])):
result_list.append(_nested_op([x[i] for x in inputs], op=op))
if isinstance(inputs[0], list):
return result_list
return tuple(result_list) |
Implements a gating function on a (memory, gate, candidate) tuple.
Final update is memory * gate + (1-gate) * candidate
This gating equation may also be referred to as Highway Network.
Highway Networks: https://arxiv.org/abs/1505.00387
Args:
x: A tuple of (memory, gate, candidate)
Returns:
The result of applying gating. | def GateBranches(x, **unused_kwargs):
"""Implements a gating function on a (memory, gate, candidate) tuple.
Final update is memory * gate + (1-gate) * candidate
This gating equation may also be referred to as Highway Network.
Highway Networks: https://arxiv.org/abs/1505.00387
Args:
x: A tuple of (memory, gate, candidate)
Returns:
The result of applying gating.
"""
assert len(x) == 3, x
state, gate, candidate = x
return gate * state + (1.0 - gate) * candidate |
Helper to determine the shape of Concatenate output. | def _concatenate_shape(input_shape, axis=-1): # pylint: disable=invalid-name
"""Helper to determine the shape of Concatenate output."""
ax = axis % len(input_shape[0])
concat_size = sum(shape[ax] for shape in input_shape)
out_shape = input_shape[0][:ax] + (concat_size,) + input_shape[0][ax+1:]
return out_shape |
Constructs a residual version of layers, summing input to layers output. | def Residual(*layers, **kwargs):
"""Constructs a residual version of layers, summing input to layers output."""
shortcut = kwargs.get('shortcut', Identity()) # pylint: disable=no-value-for-parameter
if len(layers) > 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(Serial(*layers), shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
elif len(layers) == 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(layers[0], shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
else:
raise ValueError('Empty residual combinator.') |
Train. | def train(
self,
env_fn,
hparams,
simulated,
save_continuously,
epoch,
sampling_temp=1.0,
num_env_steps=None,
env_step_multiplier=1,
eval_env_fn=None,
report_fn=None
):
"""Train."""
raise NotImplementedError() |
Adds default hparams for all of the variants of the Universal Transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for Universal Transformers hyper-parameters | def update_hparams_for_universal_transformer(hparams):
"""Adds default hparams for all of the variants of the Universal Transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for Universal Transformers hyper-parameters
"""
hparams.daisy_chain_variables = False # Breaks multi-gpu in while loops.
# If not None, mixes vanilla transformer with Universal Transformer.
# Options: None, "before_ut", and "after_ut".
hparams.add_hparam("mix_with_transformer", None)
# Number of vanilla transformer layers used to be mixed with u-transofmer.
hparams.add_hparam("num_mixedin_layers", 2)
# Number of transformer layers within the recurrent block (default is 1).
hparams.add_hparam("num_inrecurrence_layers", 1)
# Type of recurrency:
# basic, highway, skip, dwa, act, rnn, gru, lstm.
hparams.add_hparam("recurrence_type", "basic")
# Number of steps (which is equivalent to num layer in transformer).
hparams.add_hparam("num_rec_steps", hparams.num_hidden_layers)
# Add the positional mebedding at each step(horisontal timing)
hparams.add_hparam("add_position_timing_signal", True)
if hparams.add_position_timing_signal:
hparams.pos = None
# Logic of position shifting when using timing signal:
# None, "random", "step"
hparams.add_hparam("position_start_index", None)
# Add an step embedding at each step (vertical timing)
hparams.add_hparam("add_step_timing_signal", True)
# Either "learned" or "sinusoid"
hparams.add_hparam("step_timing_signal_type", "learned")
# Add or concat the timing signal (applied both on position and step timing).
# Options: "add" and "concat".
hparams.add_hparam("add_or_concat_timing_signal", "add")
# Add SRU at the beginning of each Universal Transformer step.
# This can be considered as a position timing signal
hparams.add_hparam("add_sru", False)
# Default ffn layer is separable convolution.
# Options: "fc" and "sepconv".
hparams.add_hparam("transformer_ffn_type", "fc")
# Transform bias (in models with highway or skip connection).
hparams.add_hparam("transform_bias_init", -1.0)
hparams.add_hparam("couple_carry_transform_gates", True)
# Depth-wise attention (grid-transformer!) hparams:
# Adds depth embedding, if true.
hparams.add_hparam("depth_embedding", True)
# Learns attention weights for elements (instead of positions), if true.
hparams.add_hparam("dwa_elements", True)
# Type of ffn_layer used for gate in skip, highway, etc.
# "dense" or "dense_dropconnect".
# With dense_relu_dense, the bias/kernel initializations will not be applied.
hparams.add_hparam("gate_ffn_layer", "dense")
# LSTM forget bias for lstm style recurrence.
hparams.add_hparam("lstm_forget_bias", 1.0)
# Uses the memory at the last step as the final output, if true.
hparams.add_hparam("use_memory_as_final_state", False)
# if also add a ffn unit to the transition function when using gru/lstm
hparams.add_hparam("add_ffn_unit_to_the_transition_function", False)
# Type of act: basic/accumulated/global (instead of position-wise!)/random.
hparams.add_hparam("act_type", "basic")
# Max number of steps (forces halting at this step).
hparams.add_hparam("act_max_steps", 2 * hparams.num_hidden_layers)
hparams.add_hparam("act_halting_bias_init", 1.0)
hparams.add_hparam("act_epsilon", 0.01)
hparams.add_hparam("act_loss_weight", 0.01)
return hparams |
Base parameters for Universal Transformer. | def universal_transformer_base():
"""Base parameters for Universal Transformer."""
hparams = transformer.transformer_base()
# To have a similar capacity to the transformer_base with 6 layers,
# we need to increase the size of the UT's layer
# since, in fact, UT has a single layer repeating multiple times.
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
hparams = update_hparams_for_universal_transformer(hparams)
return hparams |
Multi-layer config for adaptive Transformer on TPU. | def adaptive_universal_transformer_multilayer_tpu():
"""Multi-layer config for adaptive Transformer on TPU."""
hparams = adaptive_universal_transformer_base_tpu()
hparams.num_inrecurrence_layers = 2
hparams.mix_with_transformer = "before_ut,after_ut"
hparams.num_mixedin_layers = 1
hparams.transformer_ffn_type = "sepconv"
# TODO(lukaszkaiser): the options below don't work on TPU yet, make them work.
# hparams.add_step_timing_signal = True
# hparams.add_sru = True
# hparams.self_attention_type = "dot_product_relative_v2"
# hparams.max_relative_position = 256
return hparams |
Multi-layer config for adaptive Transformer with hard attention. | def adaptive_universal_transformer_multilayer_hard():
"""Multi-layer config for adaptive Transformer with hard attention."""
hparams = adaptive_universal_transformer_multilayer_tpu()
hparams.batch_size = 256
hparams.hard_attention_k = 8
hparams.add_step_timing_signal = True
# hparams.add_sru = True # This is very slow on GPUs, does it help?
hparams.self_attention_type = "dot_product_relative_v2"
hparams.max_relative_position = 256
return hparams |
Range of hyperparameters. | def universal_transformer_base_range(rhp):
"""Range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_discrete("num_rec_steps", [6, 8, 10])
rhp.set_discrete("hidden_size", [1024, 2048, 4096])
rhp.set_discrete("filter_size", [2048, 4096, 8192])
rhp.set_discrete("num_heads", [8, 16, 32])
rhp.set_discrete("transformer_ffn_type", ["sepconv", "fc"])
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_float("weight_decay", 0.0, 2.0) |
Range of hyperparameters. | def adaptive_universal_transformer_base_range(rhp):
"""Range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_discrete("act_max_steps", [8, 16, 32])
rhp.set_float("act_loss_weight", 0.0, 0.5)
rhp.set_discrete("hidden_size", [1024, 2048, 4096])
rhp.set_discrete("filter_size", [2048, 4096, 8192])
rhp.set_discrete("num_heads", [8, 16, 32])
rhp.set_discrete("transformer_ffn_type", ["sepconv", "fc"])
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_float("weight_decay", 0.0, 2.0) |
Split channels in 3 parts. Shifts 1st and 3rd sections to left/right. | def DiagonalGate(x, params, **kwargs):
"""Split channels in 3 parts. Shifts 1st and 3rd sections to left/right."""
del params
del kwargs
# x : [batch, 1, length, depth]
x = np.pad(
x, [(0, 0), (0, 0), (1, 1), (0, 0)], mode='constant', constant_values=0.0)
depth = x.shape[-1] // 3
assert 3 * depth == x.shape[-1], ('Depth must be divisible by 3', depth,
x.shape)
xs = [
x[:, :, :-2, :depth], x[:, :, 1:-1, depth:2 * depth],
x[:, :, 2:, 2 * depth:3 * depth]
]
return np.concatenate(xs, axis=3) |
Build convolutional GRU with diagonal gating as in ImprovedNGPU. | def ConvDiagonalGRU(units, kernel_size=(3, 3)):
"""Build convolutional GRU with diagonal gating as in ImprovedNGPU."""
def BuildConv():
return layers.Conv(filters=units, kernel_size=kernel_size, padding='SAME')
return layers.GeneralGRUCell(
candidate_transform=BuildConv,
memory_transform=DiagonalGate,
gate_nonlinearity=layers.HardSigmoid,
candidate_nonlinearity=layers.HardTanh) |
Implementation of Neural GPU: https://arxiv.org/abs/1702.08727.
Args:
feature_depth: Number of memory channels
steps: Number of times depthwise recurrence steps.
vocab_size: Vocabulary size.
Returns:
A NeuralGPU Stax model. | def NeuralGPU(feature_depth=96, steps=16, vocab_size=2):
"""Implementation of Neural GPU: https://arxiv.org/abs/1702.08727.
Args:
feature_depth: Number of memory channels
steps: Number of times depthwise recurrence steps.
vocab_size: Vocabulary size.
Returns:
A NeuralGPU Stax model.
"""
xs = []
xs.append(
layers.Embedding(feature_depth=feature_depth, vocab_size=vocab_size))
core = ConvDiagonalGRU(units=feature_depth)
xs.extend([core] * steps)
xs.append(layers.Dense(vocab_size))
xs.append(layers.LogSoftmax())
return layers.Serial(*xs) |
Strip ids_to_strip from the end ids. | def strip_ids(ids, ids_to_strip):
"""Strip ids_to_strip from the end ids."""
ids = list(ids)
while ids and ids[-1] in ids_to_strip:
ids.pop()
return ids |
Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode. | def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_" |
Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers | def encode(self, s):
"""Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
"""
return [int(w) + self._num_reserved_ids for w in s.split()] |
Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
s: human-readable string. | def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
s: human-readable string.
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return " ".join(self.decode_list(ids)) |
Transform a sequence of int ids into a their string versions.
This method supports transforming individual input/output ids to their
string versions so that sequence to/from text conversions can be visualized
in a human readable format.
Args:
ids: list of integers to be converted.
Returns:
strs: list of human-readable string. | def decode_list(self, ids):
"""Transform a sequence of int ids into a their string versions.
This method supports transforming individual input/output ids to their
string versions so that sequence to/from text conversions can be visualized
in a human readable format.
Args:
ids: list of integers to be converted.
Returns:
strs: list of human-readable string.
"""
decoded_ids = []
for id_ in ids:
if 0 <= id_ < self._num_reserved_ids:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
decoded_ids.append(id_ - self._num_reserved_ids)
return [str(d) for d in decoded_ids] |
Converts a space-separated string of tokens to a list of ids. | def encode(self, s):
"""Converts a space-separated string of tokens to a list of ids."""
sentence = s
tokens = sentence.strip().split()
if self._replace_oov is not None:
tokens = [t if t in self._token_to_id else self._replace_oov
for t in tokens]
ret = [self._token_to_id[tok] for tok in tokens]
return ret[::-1] if self._reverse else ret |
Load vocab from a file.
Args:
filename: The file to load vocabulary from. | def _init_vocab_from_file(self, filename):
"""Load vocab from a file.
Args:
filename: The file to load vocabulary from.
"""
with tf.gfile.Open(filename) as f:
tokens = [token.strip() for token in f.readlines()]
def token_gen():
for token in tokens:
yield token
self._init_vocab(token_gen(), add_reserved_tokens=False) |
Initialize tokens from a list of tokens.
It is ok if reserved tokens appear in the vocab list. They will be
removed. The set of tokens in vocab_list should be unique.
Args:
vocab_list: A list of tokens. | def _init_vocab_from_list(self, vocab_list):
"""Initialize tokens from a list of tokens.
It is ok if reserved tokens appear in the vocab list. They will be
removed. The set of tokens in vocab_list should be unique.
Args:
vocab_list: A list of tokens.
"""
def token_gen():
for token in vocab_list:
if token not in RESERVED_TOKENS:
yield token
self._init_vocab(token_gen()) |
Initialize vocabulary with tokens from token_generator. | def _init_vocab(self, token_generator, add_reserved_tokens=True):
"""Initialize vocabulary with tokens from token_generator."""
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start=non_reserved_start_index))
# _token_to_id is the reverse of _id_to_token
self._token_to_id = dict((v, k)
for k, v in six.iteritems(self._id_to_token)) |
Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to. | def store_to_file(self, filename):
"""Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
"""
with tf.gfile.Open(filename, "w") as f:
for i in range(len(self._id_to_token)):
f.write(self._id_to_token[i] + "\n") |
Converts a sequence of subtoken ids to a native string.
Args:
ids: a list of integers in the range [0, vocab_size)
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
a native string | def decode(self, ids, strip_extraneous=False):
"""Converts a sequence of subtoken ids to a native string.
Args:
ids: a list of integers in the range [0, vocab_size)
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
a native string
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return unicode_to_native(
tokenizer.decode(self._subtoken_ids_to_tokens(ids))) |
Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size) | def _tokens_to_subtoken_ids(self, tokens):
"""Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
"""
ret = []
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
return ret |
Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size) | def _token_to_subtoken_ids(self, token):
"""Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
"""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet))
self._cache[cache_location] = (token, ret)
return ret |
Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings. | def _subtoken_ids_to_tokens(self, subtokens):
"""Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings.
"""
concatenated = "".join(
[self._subtoken_id_to_subtoken_string(s) for s in subtokens])
split = concatenated.split("_")
ret = []
for t in split:
if t:
unescaped = _unescape_token(t + "_")
if unescaped:
ret.append(unescaped)
return ret |
Converts a subtoken integer ID to a subtoken string. | def _subtoken_id_to_subtoken_string(self, subtoken):
"""Converts a subtoken integer ID to a subtoken string."""
if 0 <= subtoken < self.vocab_size:
return self._all_subtoken_strings[subtoken]
return u"" |
Converts an escaped token string to a list of subtoken IDs.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtoken IDs as integers. | def _escaped_token_to_subtoken_ids(self, escaped_token):
"""Converts an escaped token string to a list of subtoken IDs.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtoken IDs as integers.
"""
return [
self._subtoken_string_to_id[subtoken]
for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)
] |
Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings. | def _escaped_token_to_subtoken_strings(self, escaped_token):
"""Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in range(
min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._subtoken_string_to_id:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, "Token substring not found in subtoken vocabulary."
return ret |
Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`. | def build_from_generator(cls,
generator,
target_size,
max_subtoken_length=None,
reserved_tokens=None):
"""Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`.
"""
token_counts = collections.defaultdict(int)
for item in generator:
for tok in tokenizer.encode(native_to_unicode(item)):
token_counts[tok] += 1
encoder = cls.build_to_target_size(
target_size, token_counts, 1, 1e3,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
return encoder |
Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`. | def build_to_target_size(cls,
target_size,
token_counts,
min_val,
max_val,
max_subtoken_length=None,
reserved_tokens=None,
num_iterations=4):
"""Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
"""
if min_val > max_val:
raise ValueError("Lower bound for the minimum token count "
"is greater than the upper bound.")
if target_size < 1:
raise ValueError("Target size must be positive.")
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
def bisect(min_val, max_val):
"""Bisection to find the right size."""
present_count = (max_val + min_val) // 2
tf.logging.info("Trying min_count %d" % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(
token_counts, present_count, num_iterations,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
# Being within 1% of the target size is ok.
is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size
# If min_val == max_val, we can't do any better than this.
if is_ok or min_val >= max_val or present_count < 2:
return subtokenizer
if subtokenizer.vocab_size > target_size:
other_subtokenizer = bisect(present_count + 1, max_val)
else:
other_subtokenizer = bisect(min_val, present_count - 1)
if other_subtokenizer is None:
return subtokenizer
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val) |
Debugging dump of the current subtoken vocabulary. | def dump(self):
"""Debugging dump of the current subtoken vocabulary."""
subtoken_strings = [(i, s)
for s, i in six.iteritems(self._subtoken_string_to_id)]
print(u", ".join(u"{0} : '{1}'".format(i, s)
for i, s in sorted(subtoken_strings))) |
Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in. | def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None):
"""Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = []
if reserved_tokens:
self._all_subtoken_strings = reserved_tokens + subtoken_strings
else:
self._all_subtoken_strings = subtoken_strings
# we remember the maximum length of any subtoken to avoid having to
# check arbitrarily long strings.
self._max_subtoken_len = max([len(s) for s in subtoken_strings])
self._subtoken_string_to_id = {
s: i + len(reserved_tokens)
for i, s in enumerate(subtoken_strings) if s
}
# Initialize the cache to empty.
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size |
Load from a file object.
Args:
f: File object to load vocabulary from | def _load_from_file_object(self, f):
"""Load from a file object.
Args:
f: File object to load vocabulary from
"""
subtoken_strings = []
for line in f:
s = line.strip()
# Some vocab files wrap words in single quotes, but others don't
if ((s.startswith("'") and s.endswith("'")) or
(s.startswith("\"") and s.endswith("\""))):
s = s[1:-1]
subtoken_strings.append(native_to_unicode(s))
self._init_subtokens_from_list(subtoken_strings)
self._init_alphabet_from_tokens(subtoken_strings) |
Load from a vocab file. | def _load_from_file(self, filename):
"""Load from a vocab file."""
if not tf.gfile.Exists(filename):
raise ValueError("File %s not found" % filename)
with tf.gfile.Open(filename) as f:
self._load_from_file_object(f) |
Transform a string with a filename into a list of RGB integers.
Args:
s: path to the file with an image.
Returns:
ids: list of integers | def encode(self, s):
"""Transform a string with a filename into a list of RGB integers.
Args:
s: path to the file with an image.
Returns:
ids: list of integers
"""
try:
import matplotlib.image as im # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Reading an image requires matplotlib to be installed: %s", e)
raise NotImplementedError("Image reading not implemented.")
return im.imread(s) |
Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size. | def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
_, tmp_file_path = tempfile.mkstemp("_decode.png")
if self._height is None or self._width is None:
size = int(math.sqrt(len(ids) / self._channels))
length = size * size * self._channels
else:
size = None
length = self._height * self._width * self._channels
if len(ids) != length:
raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x "
"channels (%d); %d != %d.\n Ids: %s"
% (len(ids), self._height, self._width, self._channels,
len(ids), length, " ".join([str(i) for i in ids])))
with tf.Graph().as_default():
raw = tf.constant(ids, dtype=tf.uint8)
if size is None:
img = tf.reshape(raw, [self._height, self._width, self._channels])
else:
img = tf.reshape(raw, [size, size, self._channels])
png = tf.image.encode_png(img)
op = tf.write_file(tmp_file_path, png)
with tf.Session() as sess:
sess.run(op)
return tmp_file_path |
Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows. | def _pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
"""
shape = onp.shape(images)
width, height, depth = shape[-3:]
images = onp.reshape(images, (-1, width, height, depth))
batch = onp.shape(images)[0]
rows = onp.minimum(rows, batch)
cols = onp.minimum(batch // rows, cols)
images = images[:rows * cols]
images = onp.reshape(images, (rows, cols, width, height, depth))
images = onp.transpose(images, [0, 2, 1, 3, 4])
images = onp.reshape(images, [rows * width, cols * height, depth])
return images |
Transform sequence of float values into string (float values).
Args:
ids: array of floats to be converted.
strip_extraneous: unused
Returns:
String having space separated float values.
Raises:
ValueError: if the ids are not of the appropriate size. | def decode(self, ids, strip_extraneous=False):
"""Transform sequence of float values into string (float values).
Args:
ids: array of floats to be converted.
strip_extraneous: unused
Returns:
String having space separated float values.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
return " ".join([str(i) for i in ids]) |
Convert an operative config string to markdown format. | def markdownify_operative_config_str(string):
"""Convert an operative config string to markdown format."""
# TODO(b/37527917): Total hack below. Implement more principled formatting.
def process(line):
"""Convert a single line to markdown format."""
if not line.startswith('#'):
return ' ' + line
line = line[2:]
if line.startswith('===='):
return ''
if line.startswith('None'):
return ' # None.'
if line.endswith(':'):
return '#### ' + line
return line
output_lines = []
for line in string.splitlines():
procd_line = process(line)
if procd_line is not None:
output_lines.append(procd_line)
return '\n'.join(output_lines) |
Close SummaryWriter. Final! | def close(self):
"""Close SummaryWriter. Final!"""
if not self._closed:
self._event_writer.close()
self._closed = True
del self._event_writer |
Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step | def scalar(self, tag, value, step=None):
"""Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
"""
value = float(onp.array(value))
if step is None:
step = self._step
else:
self._step = step
summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])
self.add_summary(summary, step) |
Saves RGB image summary from onp.ndarray [H,W], [H,W,1], or [H,W,3].
Args:
tag: str: label for this data
image: ndarray: [H,W], [H,W,1], [H,W,3] save image in greyscale or colors/
step: int: training step | def image(self, tag, image, step=None):
"""Saves RGB image summary from onp.ndarray [H,W], [H,W,1], or [H,W,3].
Args:
tag: str: label for this data
image: ndarray: [H,W], [H,W,1], [H,W,3] save image in greyscale or colors/
step: int: training step
"""
image = onp.array(image)
if step is None:
step = self._step
else:
self._step = step
if len(onp.shape(image)) == 2:
image = image[:, :, onp.newaxis]
if onp.shape(image)[-1] == 1:
image = onp.repeat(image, 3, axis=-1)
image_strio = io.BytesIO()
plt.imsave(image_strio, image, format='png')
image_summary = Summary.Image(
encoded_image_string=image_strio.getvalue(),
colorspace=3,
height=image.shape[0],
width=image.shape[1])
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.add_summary(summary, step) |
Saves (rows, cols) tiled images from onp.ndarray.
If either rows or cols aren't given, they are determined automatically
from the size of the image batch, if neither are given a long column
of images is produced. This truncates the image batch rather than padding
if it doesn't fill the final row.
Args:
tag: str: label for this data
images: ndarray: [N,H,W,1] or [N,H,W,3] to tile in 2d
step: int: training step
rows: int: number of rows in tile
cols: int: number of columns in tile | def images(self, tag, images, step=None, rows=None, cols=None):
"""Saves (rows, cols) tiled images from onp.ndarray.
If either rows or cols aren't given, they are determined automatically
from the size of the image batch, if neither are given a long column
of images is produced. This truncates the image batch rather than padding
if it doesn't fill the final row.
Args:
tag: str: label for this data
images: ndarray: [N,H,W,1] or [N,H,W,3] to tile in 2d
step: int: training step
rows: int: number of rows in tile
cols: int: number of columns in tile
"""
images = onp.array(images)
if step is None:
step = self._step
else:
self._step = step
n_images = onp.shape(images)[0]
if rows is None and cols is None:
rows = 1
cols = n_images
elif rows is None:
rows = n_images // cols
elif cols is None:
cols = n_images // rows
tiled_images = _pack_images(images, rows, cols)
self.image(tag, tiled_images, step=step) |
Saves matplotlib plot output to summary image.
Args:
tag: str: label for this data
mpl_plt: matplotlib stateful pyplot object with prepared plotting state
step: int: training step
close_plot: bool: automatically closes plot | def plot(self, tag, mpl_plt, step=None, close_plot=True):
"""Saves matplotlib plot output to summary image.
Args:
tag: str: label for this data
mpl_plt: matplotlib stateful pyplot object with prepared plotting state
step: int: training step
close_plot: bool: automatically closes plot
"""
if step is None:
step = self._step
else:
self._step = step
fig = mpl_plt.get_current_fig_manager()
img_w, img_h = fig.canvas.get_width_height()
image_buf = io.BytesIO()
mpl_plt.savefig(image_buf, format='png')
image_summary = Summary.Image(
encoded_image_string=image_buf.getvalue(),
colorspace=4, # RGBA
height=img_h,
width=img_w)
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.add_summary(summary, step)
if close_plot:
mpl_plt.close() |
Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer | def audio(self, tag, audiodata, step=None, sample_rate=44100):
"""Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer
"""
audiodata = onp.array(audiodata)
if step is None:
step = self._step
else:
self._step = step
audiodata = onp.clip(onp.squeeze(audiodata), -1, 1)
if audiodata.ndim != 1:
raise ValueError('Audio data must be 1D.')
sample_list = (32767.0 * audiodata).astype(int).tolist()
wio = io.BytesIO()
wav_buf = wave.open(wio, 'wb')
wav_buf.setnchannels(1)
wav_buf.setsampwidth(2)
wav_buf.setframerate(sample_rate)
enc = b''.join([struct.pack('<h', v) for v in sample_list])
wav_buf.writeframes(enc)
wav_buf.close()
encoded_audio_bytes = wio.getvalue()
wio.close()
audio = Summary.Audio(
sample_rate=sample_rate,
num_channels=1,
length_frames=len(sample_list),
encoded_audio_string=encoded_audio_bytes,
content_type='audio/wav')
summary = Summary(value=[Summary.Value(tag=tag, audio=audio)])
self.add_summary(summary, step) |
Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
bins: number of bins in histogram, or array of bins for onp.histogram
step: int: training step | def histogram(self, tag, values, bins, step=None):
"""Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
bins: number of bins in histogram, or array of bins for onp.histogram
step: int: training step
"""
if step is None:
step = self._step
else:
self._step = step
values = onp.array(values)
bins = onp.array(bins)
values = onp.reshape(values, -1)
counts, limits = onp.histogram(values, bins=bins)
# boundary logic
cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32))
start, end = onp.searchsorted(
cum_counts, [0, cum_counts[-1] - 1], side='right')
start, end = int(start), int(end) + 1
counts = (
counts[start -
1:end] if start > 0 else onp.concatenate([[0], counts[:end]]))
limits = limits[start:end + 1]
sum_sq = values.dot(values)
histo = HistogramProto(
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits.tolist(),
bucket=counts.tolist())
summary = Summary(value=[Summary.Value(tag=tag, histo=histo)])
self.add_summary(summary, step) |
Import module at usr_dir, if provided. | def import_usr_dir(usr_dir):
"""Import module at usr_dir, if provided."""
if not usr_dir:
return
if usr_dir == INTERNAL_USR_DIR_PACKAGE:
# The package has been installed with pip under this name for Cloud ML
# Engine so just import it.
importlib.import_module(INTERNAL_USR_DIR_PACKAGE)
return
dir_path = os.path.abspath(os.path.expanduser(usr_dir).rstrip("/"))
containing_dir, module_name = os.path.split(dir_path)
tf.logging.info("Importing user module %s from path %s", module_name,
containing_dir)
sys.path.insert(0, containing_dir)
importlib.import_module(module_name)
sys.path.pop(0) |
A set of basic hyperparameters. | def basic_params1():
"""A set of basic hyperparameters."""
return hparam.HParams(
# If the problem consists of variable-length sequences
# (see problem.batch_size_means_tokens()), then this is the number
# of tokens per batch per GPU or per TPU core. Otherwise, this is
# the number of examples per GPU or per TPU core.
batch_size=4096,
batch_shuffle_size=512,
# If True, then if the features are of variable length, the batch_size is
# used as the actual batch size (and not tokens per batch).
use_fixed_batch_size=False,
num_hidden_layers=4,
kernel_height=3,
kernel_width=1,
hidden_size=64,
compress_steps=0,
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
dropout=0.2,
clip_grad_norm=2.0,
grad_noise_scale=0.0,
summarize_grads=False,
# Flag for whether mlperf mode is on
mlperf_mode=False,
# Whether to log the name and size of every variable
summarize_vars=False,
initializer="orthogonal",
initializer_gain=1.5,
label_smoothing=0.1,
optimizer="adam",
optimizer_adam_epsilon=1e-6,
optimizer_adam_beta1=0.85,
optimizer_adam_beta2=0.997,
optimizer_momentum_momentum=0.9,
optimizer_momentum_nesterov=False,
optimizer_adafactor_beta1=0.0,
optimizer_adafactor_beta2=0.999,
optimizer_adafactor_factored=True,
optimizer_adafactor_decay_type="pow",
optimizer_adafactor_memory_exponent=0.8,
optimizer_adafactor_clipping_threshold=1.0,
optimizer_adafactor_multiply_by_parameter_scale=True,
# Number of accumulating steps for multi step optimizers.
optimizer_multistep_accumulate_steps=0,
# Loss scaling used.
# Generally only necessary with mixed precision training.
# Mixed precision training only supports exponential scaling currently
# To disable the scaler, see to 0/False
mixed_precision_optimizer_loss_scaler="exponential",
# Determines the initial loss scaling value for mixed precision
mixed_precision_optimizer_init_loss_scale=2**15,
# Whether to zero gradients that were not computed, so that the
# appropriate slots are created. Useful for sharing checkpoints between
# models with different sets of heads.
optimizer_zero_grads=False,
weight_decay=1e-6,
weight_noise=0.0,
# Defines the learning rate as a product of named functions.
# Available functions are listed in learning_rate._LEARNING_RATE_FUNCTIONS
# e.g. "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size"
learning_rate_schedule="legacy",
learning_rate_constant=1.0,
# If learning_rate_schedule=="legacy",
# then we specify decay scheme here. Warmup is always exponential,
# except with "noam" learning rate decay scheme.
# see optimize.legacy_learning_rate_schedule()
# TODO(noam): migrate everyone away from this.
learning_rate_decay_scheme="none",
# decay_steps and decay_staircase for learning_rate_decay_scheme=="exp"
learning_rate_decay_steps=5000,
learning_rate_decay_staircase=False,
learning_rate_minimum=None,
learning_rate_decay_rate=1.0,
learning_rate_warmup_steps=100,
learning_rate_cosine_cycle_steps=250000,
learning_rate=0.1,
sampling_method="argmax", # "argmax" or "random"
sampling_temp=1.0, # temperature for sampling
sampling_keep_top_k=-1, # If >0, ignore all but the top k logits
# expand the logits a piece at a time - saves memory.
factored_logits=False,
multiply_embedding_mode="sqrt_depth",
# Parameters related to mixtures of experts.
moe_hidden_sizes="2048", # hidden layer sizes (comma-separated)
moe_num_experts=64, # number of experts per layer
moe_k=2, # how many experts to use for each batch element
moe_loss_coef=1e-2,
# Sequences of operations to perform on layer input and layer output.
# Used by common_layers.layer_preprocess, common_layers.layer_postprocess
# Each character represents an operation:
# none: no preprocessing
# d: apply dropout
# n: apply normalization (see norm_type and norm_epsilon)
# a: add layer input (residual connection - only during postprocess)
# The special string "none" is used instead of the empty string
# to indicate no pre/postprocessing, since the empty string causes
# trouble for hyperparameter tuning.
# TODO(noam): The current settings ("", "dan") are the published version
# of the transformer. ("n", "da") seems better for harder-to-learn
# models, so it should probably be the default.
layer_preprocess_sequence="none",
layer_postprocess_sequence="dan",
# dropout rate to use during layer_preprocess and layer_postprocess
layer_prepostprocess_dropout=0.1,
# broadcast dimensions for layer_prepostprocess_dropout
# a comma-separated list of integers.
# see common_layers.dropout_with_broadcast_dims()
# Change this to "1" to save memory.
layer_prepostprocess_dropout_broadcast_dims="",
# dropout some symbols (set them to 0) before embedding.
symbol_dropout=0.0,
# What type of normalization to use
norm_type="layer", # "batch", layer", "noam", "none".
# epsilon parameter to normalization function
norm_epsilon=1e-6,
# pad vocabularies so that this value divides the vocabulary size.
vocab_divisor=1,
# During training, we drop sequences whose inputs and targets are shorter
# than min_length
min_length=0,
# During training, we drop sequences whose inputs or targets are longer
# than max_length.
# If max_length==0, we use hparams.batch_size instead.
max_length=0,
# Pack examples on the fly.
pack_dataset=False,
# Use custom ops not included in standard tensorflow.
use_custom_ops=True,
# Split targets on the first axis into chunks of this length.
split_targets_chunk_length=0,
split_targets_max_chunks=100,
split_targets_strided_training=False,
# Maximum length in the smallest length bucket. Setting this
# flag too high will result in wasteful padding of short
# sequences. Due to some (hopefully) temporary hacks in the
# data reading and batching code, setting this flag too low
# results in a very long batch-shuffling queue.
# TODO(noam): change this once the Datasets API changes.
min_length_bucket=8,
# This flag controls the number of length buckets in the data
# reader. The buckets have maximum lengths from
# min_bucket_length to (max_length or batch_size), increasing
# (approximately) by factors of length_bucket_step.
length_bucket_step=1.1,
# If set to True, drop sequences longer than max_length during eval.
# This affects the validity of the evaluation metrics.
eval_drop_long_sequences=False,
# If True, run the model autoregressively instead of teacher-forcing
# during eval
eval_run_autoregressive=False,
# (For features with symbol modality) If True, share all of the
# input embeddings, target embeddings, and softmax weights.
shared_embedding_and_softmax_weights=False,
# (For features with symbol modality) If True, share the input embeddings
# and target embeddings.
shared_embedding=False,
# (For features with symbol modality) Number to shard embeddings by.
symbol_modality_num_shards=1,
# Feature transformations are optional dictionaries comprising key-value
# pairs of a feature name (str) and its transformation (function). If not
# specified, T2TModel applies a default transformation according to the
# feature's modality. Bottom is applicable to all features; loss, top, and
# weights_fn are only applicable to target features.
# TODO(trandustin): `name` is an optional hparam for legacy reasons,
# defining variable scope names. Remove this hparam in the future.
bottom={},
loss={},
name={},
top={},
weights_fn={},
# The maximum length of "input" sequence.
# Sequences longer than this value will be truncated. 0 or negative values
# mean there is no maximum or truncation.
# You can change this behavior by overriding preprocess_example() method
# in your problem class.
max_input_seq_length=0,
# The maximum length of "target" sequence.
# Sequences longer than this value will be truncated. 0 or negative values
# mean there is no maximum or truncation.
# You can change this behavior by overriding preprocess_example() method
# in your problem class.
max_target_seq_length=0,
# if nonzero, we split the target sequences on example read.
# This is for use with language modeling problems with fixed length
# examples. e.g. The examples may be written with length 65536, but we
# want to split each example into 64 examples of length 1024.
split_to_length=0,
# Video settings: how many frames to batch on input and targets.
video_num_input_frames=1,
video_num_target_frames=1,
# This flag allows us to optionally treat a seq-to-seq problem
# as a language model. Legal values are:
#
# "none" - Do not prepend the inputs to the targets.
# "prepend_inputs_masked_attention"
# replace "targets" in preprocessing with
# tf.concat([inputs, [0], targets], axis=1)
# i.e. we prepend the inputs to the targets with a single
# padding token in between. Use masked self-attention on the
# entire resulting sequence. During training, we compute losses on
# the combined sequence. During eval, we compute the metrics
# on only the targets portion.
# "prepend_inputs_full_attention"
# similar to the previous option except that each
# position in the inputs portion can see the
# entire inputs portion. This removes the challenge of
# autoregressively predicting the inputs portion.
prepend_mode="none",
# Scheduled sampling is interesting for auto-regressive models.
# It runs an additional step using the generated output as autoregressive
# targets, which can improve the models inference results later. The
# parameter scheduled_sampling_prob determines with what probability
# will such additional step be run. It's turned off (0.0) by default.
# This probability will exponentially warm up for the number of
# steps determined by scheduled_sampling_warmup_steps.
# The tensor used for the n-th pass will consist of outputs from
# the (n-1)-th pass mixed with gold truth, with the proportion of gold
# determined by scheduled_sampling_gold_mixin_prob. Control the number
# of passes with scheduled_sampling_num_passes.
scheduled_sampling_prob=0.0,
scheduled_sampling_warmup_steps=50000,
scheduled_sampling_gold_mixin_prob=0.5,
# TODO(duckworthd): Uncomment when we can ascertain why adding an
# extra field to HParam causes test failures.
# scheduled_sampling_num_passes=1,
# This setting controls whether to copy variables around in a daisy chain
# (if true) or leave their placement to TensorFlow. It only affects multi
# device training and mostly should be turned on for performance. One
# exception are recurrent models: with dynamic loops it must be off.
daisy_chain_variables=True,
# If True in PREDICT mode, then last-position-only optimizations are not
# used.
force_full_predict=False,
# Set this for pure model parallelism. There is only one data shard.
no_data_parallelism=False,
# dtype used for activations. - "float32" or "bfloat16"
# activation_dtype="bfloat16" currently only works on TPU.
# It lowers activation-memory usage
# and does not appear to affect quality.
# You can train on TPU with activation_dtype="bfloat16" and evaluate
# on CPU/GPU with activation_dtype="float32"
activation_dtype="float32",
# dtype used for parameters: "float32" or "bfloat16"
# bfloat16 currently only works with optimizer="adafactor".
# The savings in memory allow for training larger models.
# Weights are encoded as (w*128)^8, using pseudostochastic
# roundoff. Initial experiments show that model quality is similar
# to baseline for about 3M training steps, but worse thereafter.
weight_dtype="float32",
# Directory containing a checkpoint for a pretrained model. This will only
# be used if a new run is being started. Parameters not found in the
# pretrained model will be randomly initialized. Superfluous parameters in
# the pretrained model will be ignored.
pretrained_model_dir="",
# Threshold used for two cases: the primary task probability for the
# constant mixing schedule, and the exponential schedule limit for when
# mixing should stop (eg: 0.5 means stop at 50-50 mixing, 0.8 means stop
# at 20-80 mixing for the primary-others mixing case.)
multiproblem_schedule_threshold=0.5,
# For more than 2 tasks, we may want to specify per-task thresholds here.
# In that case, this needs to be a string with as many floating point
# numbers as the number of tasks in the multi-problem. These numbers
# are later normalized to add up to 1 and taken as probabilities for
# each task. This enforces a constant mixing schedule and if this is
# empty then the threshold from above is used for the first task and
# the other tasks get the remaining probability split uniformly.
multiproblem_per_task_threshold="",
# The number of examples at which the proportion of the mixed in datasets
# is multiproblem_schedule_threshold
multiproblem_schedule_max_examples=1e7,
# When training multiproblems, we can mix the data according to different
# schedules. Example: a constant schedule mixing 20-80 between the primary
# and other tasks.
# A list of supported schedules can be found in
# `data_generators.multi_problem.py`.
multiproblem_mixing_schedule="constant",
# A boolean that decides whether input sequence losses and target label
# losses in classification problems should be reweighted.
multiproblem_reweight_label_loss=False,
# How much weight the targets in classification problems receive. Inputs
# receive 1 minus this weight.
multiproblem_label_weight=0.5,
# Hyperparameters for relative attention.
# The maximum relative positional distance to learn an embedding for.
max_relative_position=0,
# If heads share the same relative embedding.
heads_share_relative_embedding=False,
# If relative embedding terms are added to values too.
add_relative_to_values=False,
# If enable the host_call which is executed every training step.
# There could be a performance drop if host_call function is slow and
# cannot keep up with the TPU-side computation.
tpu_enable_host_call=False,
# Pad batch dim of inputs to nearest multiple of batch multiple.
pad_batch=False,
# When true, do not evaluate on the language model data when running the
# multiproblem since it can take a while. If False, set eval_steps to
# something large like 6000 or 10000.
multiproblem_target_eval_only=False,
# Max out the vocab size to a power of 2 for efficiency and to reserve
# extra space in the vocabulary for new task ids and label classes.
multiproblem_vocab_size=-1,
# When using multiproblem with generation tasks, need to truncate the
# inputs and targets manually before concatenating them.
multiproblem_max_input_length=-1,
multiproblem_max_target_length=-1,
# If positive, makes training targets fixed-length in MultiProblem.
multiproblem_fixed_train_length=-1,
# Load weights from a second model. For instance, when using
# pre-trained weights, you might want to initialize the encoder
# and decoder by loading different models.
warm_start_from_second="",
# Area attention hyper parameters
area_value_mode="none",
area_key_mode="none",
# Using area attention for the number of layers from the bottom
num_area_layers=0,
max_area_width=1,
max_area_height=1,
memory_height=1
) |
A basic range of hyperparameters. | def basic_range1(ranged_hparams):
"""A basic range of hyperparameters."""
rhp = ranged_hparams
rhp.set_discrete("batch_size", [1024, 2048, 4096])
rhp.set_discrete("num_hidden_layers", [1, 2, 3, 4, 5, 6])
rhp.set_discrete("hidden_size", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE)
rhp.set_discrete("kernel_height", [1, 3, 5, 7])
rhp.set_discrete("kernel_width", [1, 3, 5, 7])
rhp.set_discrete("compress_steps", [0, 1, 2])
rhp.set_float("dropout", 0.0, 0.5)
rhp.set_float("weight_decay", 1e-4, 10.0, scale=rhp.LOG_SCALE)
rhp.set_float("label_smoothing", 0.0, 0.2)
rhp.set_float("clip_grad_norm", 0.01, 50.0, scale=rhp.LOG_SCALE)
rhp.set_float("learning_rate", 0.005, 2.0, scale=rhp.LOG_SCALE)
rhp.set_categorical("initializer",
["uniform", "orthogonal", "uniform_unit_scaling"])
rhp.set_float("initializer_gain", 0.5, 3.5)
rhp.set_categorical("learning_rate_decay_scheme",
["none", "sqrt", "noam", "exp"])
rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE)
rhp.set_float("optimizer_adam_beta1", 0.8, 0.9)
rhp.set_float("optimizer_adam_beta2", 0.995, 0.999)
rhp.set_categorical(
"optimizer",
["adam", "adagrad", "momentum", "rms_prop", "sgd", "yellow_fin"]) |
Check if name is in orig_ctr or in one of the other type containers. | def _check_reset_and_type_change(self, name, orig_ctr):
"""Check if name is in orig_ctr or in one of the other type containers."""
# Resetting a hyperparameter
if name in orig_ctr:
tf.logging.warning("Overwriting hparam %s", name)
ctr_names = [
(self._categorical_params, "categorical"),
(self._discrete_params, "discrete"),
(self._float_params, "float"),
(self._int_params, "int"),
]
ctrs, names = list(zip(*ctr_names))
orig_name = names[ctrs.index(orig_ctr)]
for ctr, ctr_name in ctr_names:
if ctr is orig_ctr:
continue
# Using a different type for the same hyperparameter name
if name in ctr:
raise ValueError("Setting hyperparameter %s as type %s, but a "
"hyperparemeter of the same name was originally "
"registered as type %s" % (name, ctr_name, orig_name)) |
To list of dicts suitable for Cloud ML Engine hyperparameter tuning. | def to_parameter_specs(self, name_prefix=""):
"""To list of dicts suitable for Cloud ML Engine hyperparameter tuning."""
specs = []
for name, categories, _ in self._categorical_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "CATEGORICAL",
"categoricalValues": categories,
}
specs.append(spec)
for name, feasible_points, scale, _ in self._discrete_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DISCRETE",
"discreteValues": feasible_points,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._float_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DOUBLE",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._int_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "INTEGER",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
return specs |
Create and register problems for the game.
Args:
game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist".
game_mode: the frame skip and sticky keys config.
Raises:
ValueError: if game_name or game_mode are wrong. | def register_game(game_name, game_mode="NoFrameskip-v4"):
"""Create and register problems for the game.
Args:
game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist".
game_mode: the frame skip and sticky keys config.
Raises:
ValueError: if game_name or game_mode are wrong.
"""
if game_name not in ATARI_GAMES:
raise ValueError("Game %s not in ATARI_GAMES" % game_name)
if game_mode not in ATARI_GAME_MODES:
raise ValueError("Unknown ATARI game mode: %s." % game_mode)
camel_game_name = misc_utils.snakecase_to_camelcase(game_name) + game_mode
# Create and register the Problem
cls = type("Gym%sRandom" % camel_game_name,
(T2TGymEnv,), {"base_env_name": camel_game_name})
registry.register_problem(cls) |
Decodes a single observation from PNG. | def _decode_png(self, encoded_observation):
"""Decodes a single observation from PNG."""
return self._session.obj.run(
self._decoded_image_t.obj,
feed_dict={self._encoded_image_p.obj: encoded_observation}
) |
Encodes observations as PNG. | def _encode_observations(self, observations):
"""Encodes observations as PNG."""
return [
Observation(
self._session.obj.run(
self._encoded_image_t.obj,
feed_dict={self._decoded_image_p.obj: observation}
),
self._decode_png
)
for observation in observations
] |
Makes a step in all environments.
Does any preprocessing and records frames.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
Raises:
ValueError: when the data for current epoch has already been loaded. | def step(self, actions):
"""Makes a step in all environments.
Does any preprocessing and records frames.
Args:
actions: Batch of actions.
Returns:
(obs, rewards, dones) - batches of observations, rewards and done flags
respectively.
Raises:
ValueError: when the data for current epoch has already been loaded.
"""
if self._store_rollouts and \
self._rollouts_by_epoch_and_split[self.current_epoch]:
raise ValueError(
"Data for current epoch has already been loaded from disk."
)
(obs, unclipped_rewards, dones) = self._step(actions)
obs = self._preprocess_observations(obs)
(min_reward, max_reward) = self.reward_range
rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward))
if self._store_rollouts:
unclipped_rewards = unclipped_rewards.astype(np.float64)
encoded_obs = self._encode_observations(obs)
for (rollout, frame, action) in zip(
self._current_batch_rollouts, self._current_batch_frames, actions
):
rollout.append(frame._replace(action=action))
# orud = (observation, reward, unclipped_reward, done)
self._current_batch_frames = [
Frame(*orud, action=None)
for orud in zip(encoded_obs, rewards, unclipped_rewards, dones)
]
return (obs, rewards, dones) |
Resets environments at given indices.
Does any preprocessing and adds rollouts to history.
Args:
indices: Indices of environments to reset.
Returns:
Batch of initial observations of reset environments.
Raises:
ValueError: when there's no current epoch. | def reset(self, indices=None):
"""Resets environments at given indices.
Does any preprocessing and adds rollouts to history.
Args:
indices: Indices of environments to reset.
Returns:
Batch of initial observations of reset environments.
Raises:
ValueError: when there's no current epoch.
"""
if self._store_rollouts and self.current_epoch is None:
raise ValueError(
"No current epoch. start_new_epoch() should first be called."
)
if indices is None:
indices = np.arange(self.batch_size)
new_obs = self._reset(indices)
if self._should_preprocess_on_reset:
new_obs = self._preprocess_observations(new_obs)
if self._store_rollouts:
encoded_obs = self._encode_observations(new_obs)
for (index, ob) in zip(indices, encoded_obs):
frame = self._current_batch_frames[index]
if frame is not None:
rollout = self._current_batch_rollouts[index]
rollout.append(frame._replace(action=0))
self._current_epoch_rollouts.append(rollout)
self._current_batch_rollouts[index] = []
self._current_batch_frames[index] = Frame(
observation=ob, reward=0, unclipped_reward=0, done=False,
action=None
)
return new_obs |
Additional data fields to store on disk and their decoders. | def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
field_names = ("frame_number", "action", "reward", "done")
data_fields = {
name: tf.FixedLenFeature([1], tf.int64) for name in field_names
}
decoders = {
name: tf.contrib.slim.tfexample_decoder.Tensor(tensor_key=name)
for name in field_names
}
return (data_fields, decoders) |
Splits frames in the current epoch according to self.dataset_splits.
Rollouts can be broken on shard boundary. This is desirable when we have
few long rollouts and we want to make sure we have data in the dev set. | def _split_current_epoch(self):
"""Splits frames in the current epoch according to self.dataset_splits.
Rollouts can be broken on shard boundary. This is desirable when we have
few long rollouts and we want to make sure we have data in the dev set.
"""
num_frames = self._calc_num_frames(self._current_epoch_rollouts)
num_shards = sum(split["shards"] for split in self.dataset_splits)
shard_size = num_frames // num_shards
splits = self.dataset_splits
num_saved_frames = 0
split_index = 0
split_begin_index = 0
rollouts_by_split = collections.defaultdict(list)
def split_size(split_index):
return splits[split_index]["shards"] * shard_size
for rollout in self._current_epoch_rollouts:
num_saved_frames_current_rollout = 0
# Split the rollout into chunks corresponding to dataset splits. In most
# cases there should be only one chunk. On dataset split boundary there
# will be two. If a rollout is longer then the size of a dataset split,
# there might be more.
while num_saved_frames_current_rollout < len(rollout):
max_chunk_length = (
split_begin_index + split_size(split_index) - num_saved_frames
)
if split_index == len(splits) - 1:
# Put the remainder in the last split to preserve the ordering.
max_chunk_length = len(rollout)
rollout_chunk = rollout[
num_saved_frames_current_rollout:
(num_saved_frames_current_rollout + max_chunk_length)
]
rollouts_by_split[splits[split_index]["split"]].append(rollout_chunk)
num_saved_frames_current_rollout += len(rollout_chunk)
num_saved_frames += len(rollout_chunk)
if num_saved_frames == split_begin_index + split_size(split_index):
split_begin_index += split_size(split_index)
split_index = min(split_index + 1, len(splits) - 1)
self._rollouts_by_epoch_and_split[self.current_epoch] = rollouts_by_split
self._current_epoch_rollouts = [] |
List of pairs (split, paths) for the current epoch. | def splits_and_paths(self, data_dir):
"""List of pairs (split, paths) for the current epoch."""
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
def append_epoch(paths):
return [
"{}.{}".format(path, self.current_epoch)
for path in paths
]
# We set shuffled=True as we don't want to shuffle on disk later.
return [
(split["split"], append_epoch(filepath_fns[split["split"]](
data_dir, split["shards"], shuffled=True
)))
for split in self.dataset_splits
] |
Saves the current epoch rollouts to disk, split into train/dev sets. | def generate_data(self, data_dir, tmp_dir=None, task_id=-1):
"""Saves the current epoch rollouts to disk, split into train/dev sets."""
if not self._rollouts_by_epoch_and_split[self.current_epoch]:
# Data not loaded from disk.
self._split_current_epoch()
rollouts_by_split = self._rollouts_by_epoch_and_split[self.current_epoch]
splits_and_paths = self.splits_and_paths(data_dir)
for (split, paths) in splits_and_paths:
rollouts = rollouts_by_split[split]
num_frames = self._calc_num_frames(rollouts)
shard_size = num_frames // len(paths)
frame_gen = self._generate_frames(rollouts)
for (path_index, path) in enumerate(paths):
limit = shard_size
# Put the remainder in the last shard to preserve the ordering.
if path_index == len(paths) - 1:
limit = None
generator_utils.generate_files(
itertools.islice(frame_gen, limit), [path],
cycle_every_n=float("inf")
) |
Sets the state that will be used on next reset. | def set_initial_state(self, initial_state, initial_frames):
"""Sets the state that will be used on next reset."""
self._initial_state = initial_state
self._initial_frames = initial_frames[:, -1, ...]
self._should_preprocess_on_reset = False |
Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object. | def image_to_tf_summary_value(image, tag):
"""Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object.
"""
curr_image = np.asarray(image, dtype=np.uint8)
height, width, n_channels = curr_image.shape
# If monochrome image, then reshape to [height, width]
if n_channels == 1:
curr_image = np.reshape(curr_image, [height, width])
s = io.BytesIO()
matplotlib_pyplot().imsave(s, curr_image, format="png")
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=height, width=width,
colorspace=n_channels)
return tf.Summary.Value(tag=tag, image=img_sum) |
Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara | def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
"""
decode_hparams = hook_args.decode_hparams
if not decode_hparams.display_decoded_images:
return []
predictions = hook_args.predictions[0]
# Display ten random inputs and outputs so that tensorboard does not hang.
all_summaries = []
rand_predictions = np.random.choice(predictions, size=10)
for ind, prediction in enumerate(rand_predictions):
output_summary = image_to_tf_summary_value(
prediction["outputs"], tag="%d_output" % ind)
input_summary = image_to_tf_summary_value(
prediction["inputs"], tag="%d_input" % ind)
all_summaries.append(input_summary)
all_summaries.append(output_summary)
return all_summaries |
image resize function used by quite a few image problems. | def resize_by_area(img, size):
"""image resize function used by quite a few image problems."""
return tf.to_int64(
tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA)) |
Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to.
resize_method: tf.image.ResizeMethod.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels]. | def make_multiscale(image, resolutions,
resize_method=tf.image.ResizeMethod.BICUBIC,
num_channels=3):
"""Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to.
resize_method: tf.image.ResizeMethod.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels].
"""
scaled_images = []
for height in resolutions:
scaled_image = tf.image.resize_images(
image,
size=[height, height], # assuming that height = width
method=resize_method)
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([height, height, num_channels])
scaled_images.append(scaled_image)
return scaled_images |
Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function
assumes VALID padding, so the original image's height must be divisible
by each resolution's height to return the exact resolution size.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels] if resolutions properly
divide the original image's height; otherwise shape height and width is up
to valid skips. | def make_multiscale_dilated(image, resolutions, num_channels=3):
"""Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function
assumes VALID padding, so the original image's height must be divisible
by each resolution's height to return the exact resolution size.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels] if resolutions properly
divide the original image's height; otherwise shape height and width is up
to valid skips.
"""
image_height = common_layers.shape_list(image)[0]
scaled_images = []
for height in resolutions:
dilation_rate = image_height // height # assuming height = width
scaled_image = image[::dilation_rate, ::dilation_rate]
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([None, None, num_channels])
scaled_images.append(scaled_image)
return scaled_images |
Yield images encoded as pngs. | def encode_images_as_png(images):
"""Yield images encoded as pngs."""
if tf.executing_eagerly():
for image in images:
yield tf.image.encode_png(image).numpy()
else:
(height, width, channels) = images[0].shape
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels))
encoded_image_t = tf.image.encode_png(image_t)
with tf.Session() as sess:
for image in images:
enc_string = sess.run(encoded_image_t, feed_dict={image_t: image})
yield enc_string |
Generator for images that takes image and labels lists and creates pngs.
Args:
images: list of images given as [width x height x channels] numpy arrays.
labels: list of ints, same length as images.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as PNG,
* image/format: the string "png" representing image format,
* image/class/label: an integer representing the label,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a singleton list of the corresponding type.
Raises:
ValueError: if images is an empty list. | def image_generator(images, labels):
"""Generator for images that takes image and labels lists and creates pngs.
Args:
images: list of images given as [width x height x channels] numpy arrays.
labels: list of ints, same length as images.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as PNG,
* image/format: the string "png" representing image format,
* image/class/label: an integer representing the label,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a singleton list of the corresponding type.
Raises:
ValueError: if images is an empty list.
"""
if not images:
raise ValueError("Must provide some images for the generator.")
width, height, _ = images[0].shape
for (enc_image, label) in zip(encode_images_as_png(images), labels):
yield {
"image/encoded": [enc_image],
"image/format": ["png"],
"image/class/label": [int(label)],
"image/height": [height],
"image/width": [width]
} |
Image augmentation: cropping, flipping, and color transforms. | def image_augmentation(images, do_colors=False, crop_size=None):
"""Image augmentation: cropping, flipping, and color transforms."""
if crop_size is None:
crop_size = [299, 299]
images = tf.random_crop(images, crop_size + [3])
images = tf.image.random_flip_left_right(images)
if do_colors: # More augmentation, but might be slow.
images = tf.image.random_brightness(images, max_delta=32. / 255.)
images = tf.image.random_saturation(images, lower=0.5, upper=1.5)
images = tf.image.random_hue(images, max_delta=0.2)
images = tf.image.random_contrast(images, lower=0.5, upper=1.5)
return images |
Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
images: a Tensor.
Returns:
Tensor of the same shape as images. | def cifar_image_augmentation(images):
"""Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
images: a Tensor.
Returns:
Tensor of the same shape as images.
"""
images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)
images = tf.random_crop(images, [32, 32, 3])
images = tf.image.random_flip_left_right(images)
return images |
Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images translated by the provided wsr and hsr. | def random_shift(image, wsr=0.1, hsr=0.1):
"""Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images translated by the provided wsr and hsr.
"""
height, width, _ = common_layers.shape_list(image)
width_range, height_range = wsr*width, hsr*height
height_translations = tf.random_uniform((1,), -height_range, height_range)
width_translations = tf.random_uniform((1,), -width_range, width_range)
translations = tf.concat((height_translations, width_translations), axis=0)
return tf.contrib.image.translate(image, translations=translations) |
Get the common attention and feed-forward layers.
The returned layer functions will have the following signature:
y, extra_loss = fct(x)
extra_loss is set to 0.0 if the layer doesn't have extra loss.
If dp is provided, the layers will be distributed within the devices.
If moe wants to be used, both dp and model need to be set.
Args:
hparams (tf.HParams): the model hparameters
dp (expert_utils.Parallelism): A data parallelism object. If not given,
the dp calls are simply ignored.
Returns:
dict[str:fct]: A dictionary containing the standardized functions | def get_standardized_layers(hparams, dp=None):
"""Get the common attention and feed-forward layers.
The returned layer functions will have the following signature:
y, extra_loss = fct(x)
extra_loss is set to 0.0 if the layer doesn't have extra loss.
If dp is provided, the layers will be distributed within the devices.
If moe wants to be used, both dp and model need to be set.
Args:
hparams (tf.HParams): the model hparameters
dp (expert_utils.Parallelism): A data parallelism object. If not given,
the dp calls are simply ignored.
Returns:
dict[str:fct]: A dictionary containing the standardized functions
"""
def partial(fct, *args, **kwargs):
"""Same as functools.partial but with functools.wraps."""
return functools.wraps(fct)(functools.partial(fct, *args, **kwargs))
def register_layer(
fct_in,
default_args=None,
default_kwargs=None,
use_dp=True,
recompute_grad=False,
):
"""Turn a function into its standardized version.
Args:
fct_in (fct): The function to register
default_args (list): The default parameters to add to the function.
default_kwargs (dict): The default parameters to add to the function.
Those arguments can be overwritten when calling the function.
use_dp (bool): Wrap the function call within a dataparallelism object if
dp is available. Some layers (like MOE) must be called without dp.
recompute_grad (bool): If True, recompute the function during the
backward pass to save memory
Returns:
fct: the standardized layer function.
"""
# The kwargs given when calling the function overwrite the default ones
fct_in = partial(fct_in, *(default_args or []), **(default_kwargs or {}))
@functools.wraps(fct_in)
def decorator(x, *args, **kwargs):
"""Call the layer function."""
fct = fct_in # For closure. Could use nonlocal with Python 3
# Eventually create the memory optimized version of the function
if recompute_grad:
fct = partial(fct, **kwargs) # recompute_grad only accept args
fct = common_layers.recompute_grad(fct)
kwargs = {}
# Eventually use dp (if given and not MoE)
if use_dp and dp is not None:
y = dp(fct, x, *args, **kwargs)
else:
y = fct(x, *args, **kwargs)
# Eventually capture the extra loss
extra_loss = 0.0
if isinstance(y, tuple):
y, extra_loss = y
return y, extra_loss
return decorator
total_key_depth = hparams.attention_key_channels or hparams.hidden_size
total_value_depth = hparams.attention_value_channels or hparams.hidden_size
# Attention layers:
# === Multi-head full attention layer ===
multihead_attention_fn = register_layer(
multihead_attention,
default_kwargs=dict(
memory_antecedent=None, # Self-attention by default
bias=None,
total_key_depth=total_key_depth,
total_value_depth=total_value_depth,
output_depth=hparams.hidden_size,
num_heads=hparams.num_heads,
dropout_rate=hparams.attention_dropout,
))
# === Memory efficient full-attention layer ===
# Save memory by not storing the activations and
# recomputing them during the backward pass
memeff_attention_base_fn = register_layer(
multihead_attention,
default_kwargs=dict(
total_key_depth=total_key_depth,
total_value_depth=total_value_depth,
output_depth=hparams.hidden_size,
num_heads=hparams.num_heads,
dropout_rate=hparams.attention_dropout,
),
recompute_grad=True,
)
def memeff_attention_fn(*args, **kwargs):
"""Modify args/kwargs for compatibility with recompute_grad."""
kwargs = kwargs.copy()
assert len(args) == 1
x = args[0]
memory_antecedent = kwargs.pop("memory_antecedent", x) # Same as x if None
if kwargs.get("bias", None) is not None: # Case where bias has been set
args = (x, memory_antecedent, kwargs.pop("bias"))
else:
# Otherwise, only 2 args. This is necessary as recompute_grad does not
# support None values.
args = (x, memory_antecedent)
return memeff_attention_base_fn(*args, **kwargs)
# === Local attention (unmasked) layer ===
# Reuse same parameters as multihead_attention
# Don't mask the future
local_attention_fn = partial(
multihead_attention_fn,
block_length=hparams.attention_loc_block_length,
block_width=hparams.attention_loc_block_width,
attention_type="local_unmasked",
)
# === Local attention (masked) layer ===
# Reuse same parameters as multihead_attention
# Only works for self attention. Always mask the future.
local_attention_masked_fn = partial(
multihead_attention_fn,
block_length=hparams.attention_loc_block_length,
attention_type="local_mask_right",
)
# === Masked memory-compressed multihead self attention layer ===
# Only works for self attention. Always mask the future.
compressed_attention_masked_fn = register_layer(
multihead_self_attention_reduced,
default_kwargs=dict(
factor=hparams.attention_red_factor,
nonlinearity=hparams.attention_red_nonlinearity,
reduction_type=hparams.attention_red_type,
multihead_params=dict(
total_key_depth=total_key_depth,
total_value_depth=total_value_depth,
num_heads=hparams.num_heads,
dropout_rate=hparams.attention_dropout,
),
),
)
# === Unmasked memory-compressed multihead self attention layer ===
# Only works for self attention. Never mask the future. Bias never added
compressed_attention_fn = partial(
compressed_attention_masked_fn,
add_mask=False,
)
# Feed-forwards layers:
# === FC layer ===
conv_hidden_relu = register_layer(
common_layers.conv_hidden_relu,
default_kwargs=dict(
hidden_size=hparams.filter_size,
output_size=hparams.hidden_size,
dropout=hparams.relu_dropout,
),
)
# === Separable convolution layer ===
# No mask applied
sep_conv_relu = partial(
conv_hidden_relu,
padding="SAME",
# Parameters copied from the transformer model, could add hparams
kernel_size=(3, 1),
second_kernel_size=(31, 1),
)
# === Separable convolution layer (masked version) ===
# Mask the future
sep_conv_relu_masked = partial(
sep_conv_relu,
padding="LEFT", # Mask future for decoder
)
# Define all available layers
cur_layers = dict(
# Attention layers:
a=multihead_attention_fn, # Multihead full attention
loc=local_attention_fn, # Local attention
locm=local_attention_masked_fn, # Local attention (masked)
red=compressed_attention_fn, # Memory-compressed attention
redm=compressed_attention_masked_fn, # Memory-compressed att (masked)
mem=memeff_attention_fn, # Memory efficient
# Feed-forward layers:
fc=conv_hidden_relu, # Fully connected
sep=sep_conv_relu, # Separable convolution (unmasked)
sepm=sep_conv_relu_masked, # Separable convolution (masked)
)
return cur_layers |
Adds the hparams used by get_standardized_layers. | def add_standard_attention_hparams(hparams):
"""Adds the hparams used by get_standardized_layers."""
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
# hparams used and which should have been defined outside (in
# common_hparams):
# Global flags
# hparams.mode
# hparams.hidden_size
# Pre-post processing flags
# hparams.layer_preprocess_sequence
# hparams.layer_postprocess_sequence
# hparams.layer_prepostprocess_dropout
# hparams.norm_type
# hparams.norm_epsilon
# Mixture-of-Expert flags
# hparams.moe_hidden_sizes
# hparams.moe_num_experts
# hparams.moe_k
# hparams.moe_loss_coef
# Attention layers flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("attention_dropout", 0.0)
# Attention: Local
hparams.add_hparam("attention_loc_block_length", 256)
# Attention: Local (unmasked only): How much to look left.
hparams.add_hparam("attention_loc_block_width", 128)
# Attention: Memory-compressed
hparams.add_hparam("attention_red_factor", 3)
hparams.add_hparam("attention_red_type", "conv")
hparams.add_hparam("attention_red_nonlinearity", "none")
# Fully connected layers flags
# To be more consistent, should use filter_size to also control the MOE
# size if moe_hidden_sizes not set.
hparams.add_hparam("filter_size", 2048)
hparams.add_hparam("relu_dropout", 0.0)
return hparams |
Computes encdec attention loss between expected and actual attentions.
Args:
expected_attention_logits: Tensor storing the expected encoder-decoder
attention logits with shape [batch_size, target_length, input_length].
actual_attentions: Dictionary with actual attention logits for different
attention types and hidden layers.
loss_type: type of the loss function.
loss_multiplier: multiplier for the attention loss.
Returns:
KL_divergence loss between the actual and expected attention logits. | def encoder_decoder_attention_loss(expected_attention_logits,
actual_attentions,
loss_type="kl_divergence",
loss_multiplier=1.0):
"""Computes encdec attention loss between expected and actual attentions.
Args:
expected_attention_logits: Tensor storing the expected encoder-decoder
attention logits with shape [batch_size, target_length, input_length].
actual_attentions: Dictionary with actual attention logits for different
attention types and hidden layers.
loss_type: type of the loss function.
loss_multiplier: multiplier for the attention loss.
Returns:
KL_divergence loss between the actual and expected attention logits.
"""
def combine_attentions(attention_list):
"""Combine different layer attentions and then average over layers/heads."""
# Stack all hidden layer attention tensors to get a tensor with shape
# [num_hidden_layers, batch_size, num_heads, target_length, input_length].
attentions = tf.stack(attention_list)
# Reduce mean across all layers (axis=0) and all heads (axis=2) to get a
# tensor with shape [batch_size, target_length, input_length].
return tf.reduce_mean(attentions, [0, 2])
def kl_divergence_loss(expected_logits, actual_logits):
p = tfp.distributions.Categorical(logits=expected_logits)
q = tfp.distributions.Categorical(logits=actual_logits)
return tfp.distributions.kl_divergence(p, q)
def mse_loss(expected_logits, actual_weights):
expected_weights = tf.nn.softmax(expected_logits)
return tf.losses.mean_squared_error(expected_weights, actual_weights)
# For each hidden layer, we have attention-logit and attention-weight tensors
# with shape [batch_size, num_heads, target_length, input_length].
loss = 0.0
if loss_type == "mse":
actual_encdec_attention_weights = [
t for layer_key, t in actual_attentions.items()
if "encdec_attention" in layer_key and not layer_key.endswith("/logits")
]
actual_attention_weights = combine_attentions(
actual_encdec_attention_weights)
loss = mse_loss(expected_attention_logits, actual_attention_weights)
else:
actual_encdec_attention_logits = [
t for layer_key, t in actual_attentions.items()
if "encdec_attention" in layer_key and layer_key.endswith("/logits")
]
actual_attention_logits = combine_attentions(actual_encdec_attention_logits)
loss = kl_divergence_loss(expected_attention_logits,
actual_attention_logits)
return loss * loss_multiplier |
Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels] | def get_timing_signal_1d(length,
channels,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
"""Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
"""
position = tf.to_float(tf.range(length) + start_index)
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
tf.maximum(tf.to_float(num_timescales) - 1, 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return signal |
Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
x: a Tensor with shape [batch, length, channels]
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor the same shape as x. | def add_timing_signal_1d(x,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
x: a Tensor with shape [batch, length, channels]
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor the same shape as x.
"""
length = common_layers.shape_list(x)[1]
channels = common_layers.shape_list(x)[2]
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale,
start_index)
return x + common_layers.cast_like(signal, x) |
get n-dimensional embedding as the layer (vertical) timing signal.
Adds embeddings to represent the position of the layer in the tower.
Args:
channels: dimension of the timing signal
layer: layer num
num_layers: total number of layers
Returns:
a Tensor of timing signals [1, 1, channels]. | def get_layer_timing_signal_learned_1d(channels, layer, num_layers):
"""get n-dimensional embedding as the layer (vertical) timing signal.
Adds embeddings to represent the position of the layer in the tower.
Args:
channels: dimension of the timing signal
layer: layer num
num_layers: total number of layers
Returns:
a Tensor of timing signals [1, 1, channels].
"""
shape = [num_layers, 1, 1, channels]
layer_embedding = (
tf.get_variable(
"layer_embedding",
shape,
initializer=tf.random_normal_initializer(0, channels**-0.5)) *
(channels**0.5))
return layer_embedding[layer, :, :, :] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.