desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Transform a sequence of int ids into a human-readable string. EOS is not expected in ids. Args: ids: list of integers to be converted. Returns: s: human-readable string.'
def decode(self, ids):
decoded_ids = [] for id_ in ids: if (0 <= id_ < self._num_reserved_ids): decoded_ids.append(RESERVED_TOKENS[int(id_)]) else: decoded_ids.append((id_ - self._num_reserved_ids)) return ' '.join([str(d) for d in decoded_ids])
'Initialize from a file or list, one token per line.'
def __init__(self, vocab_filename, reverse=False, vocab_list=None, num_reserved_ids=NUM_RESERVED_TOKENS):
super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) self._reverse = reverse if vocab_filename: self._init_vocab_from_file(vocab_filename) else: assert (vocab_list is not None) self._init_vocab_from_list(vocab_list)
'Converts a space-separated string of tokens to a list of ids.'
def encode(self, sentence):
ret = [self._token_to_id[tok] for tok in sentence.strip().split()] return (ret[::(-1)] if self._reverse else ret)
'Load vocab from a file.'
def _init_vocab_from_file(self, filename):
def token_gen(): with tf.gfile.Open(filename) as f: for line in f: token = line.strip() (yield token) self._init_vocab(token_gen())
'Initialize vocabulary with tokens from token_generator.'
def _init_vocab(self, token_generator):
self._id_to_token = {} self._id_to_token.update(dict(list(enumerate(RESERVED_TOKENS)))) token_id = len(RESERVED_TOKENS) for token in token_generator: self._id_to_token[token_id] = token token_id += 1 self._token_to_id = dict([(v, k) for (k, v) in six.iteritems(self._id_to_token)])
'Initialize and read from a file, if provided.'
def __init__(self, filename=None):
self._alphabet = set() if (filename is not None): self._load_from_file(filename) super(SubwordTextEncoder, self).__init__(num_reserved_ids=None)
'Converts a native string to a list of subtoken ids. Args: raw_text: a native string. Returns: a list of integers in the range [0, vocab_size)'
def encode(self, raw_text):
return self._tokens_to_subtoken_ids(tokenizer.encode(native_to_unicode(raw_text)))
'Converts a sequence of subtoken ids to a native string. Args: subtokens: a list of integers in the range [0, vocab_size) Returns: a native string'
def decode(self, subtokens):
return unicode_to_native(tokenizer.decode(self._subtoken_ids_to_tokens(subtokens)))
'The subtoken vocabulary size.'
@property def vocab_size(self):
return len(self._all_subtoken_strings)
'Converts a list of tokens to a list of subtoken ids. Args: tokens: a list of strings. Returns: a list of integers in the range [0, vocab_size)'
def _tokens_to_subtoken_ids(self, tokens):
ret = [] for token in tokens: ret.extend(self._escaped_token_to_subtoken_ids(_escape_token(token, self._alphabet))) return ret
'Converts a list of subtoken ids to a list of tokens. Args: subtokens: a list of integers in the range [0, vocab_size) Returns: a list of strings.'
def _subtoken_ids_to_tokens(self, subtokens):
concatenated = ''.join([self._subtoken_id_to_subtoken_string(s) for s in subtokens]) split = concatenated.split('_') return [_unescape_token((t + '_')) for t in split if t]
'Converts a subtoken integer ID to a subtoken string.'
def _subtoken_id_to_subtoken_string(self, subtoken):
if (0 <= subtoken < self.vocab_size): return self._all_subtoken_strings[subtoken] return u''
'Converts an escaped token string to a list of subtoken strings. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtokens as unicode strings.'
def _escaped_token_to_subtoken_strings(self, escaped_token):
ret = [] start = 0 token_len = len(escaped_token) while (start < token_len): for end in xrange(min(token_len, (start + self._max_subtoken_len)), start, (-1)): subtoken = escaped_token[start:end] if (subtoken in self._subtoken_string_to_id): ret.append(subtoken) start = end break else: assert False, 'Token substring not found in subtoken vocabulary.' return ret
'Converts an escaped token string to a list of subtoken IDs. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtoken IDs as integers.'
def _escaped_token_to_subtoken_ids(self, escaped_token):
return [self._subtoken_string_to_id[subtoken] for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)]
'Builds a SubwordTextEncoder that has `vocab_size` near `target_size`. Uses simple recursive binary search to find a minimum token count that most closely matches the `target_size`. Args: target_size: Desired vocab_size to approximate. token_counts: A dictionary of token counts, mapping string to int. min_val: An integer; lower bound for the minimum token count. max_val: An integer; upper bound for the minimum token count. num_iterations: An integer; how many iterations of refinement. Returns: A SubwordTextEncoder instance. Raises: ValueError: If `min_val` is greater than `max_val`.'
@classmethod def build_to_target_size(cls, target_size, token_counts, min_val, max_val, num_iterations=4):
if (min_val > max_val): raise ValueError('Lower bound for the minimum token count is greater than the upper bound.') def bisect(min_val, max_val): 'Bisection to find the right size.' present_count = ((max_val + min_val) // 2) tf.logging.info(('Trying min_count %d' % present_count)) subtokenizer = cls() subtokenizer.build_from_token_counts(token_counts, present_count, num_iterations) if ((subtokenizer.vocab_size == target_size) or (min_val >= max_val)): return subtokenizer if (subtokenizer.vocab_size > target_size): other_subtokenizer = bisect((present_count + 1), max_val) else: other_subtokenizer = bisect(min_val, (present_count - 1)) if (other_subtokenizer is None): return subtokenizer if (abs((other_subtokenizer.vocab_size - target_size)) < abs((subtokenizer.vocab_size - target_size))): return other_subtokenizer return subtokenizer return bisect(min_val, max_val)
'Train a SubwordTextEncoder based on a dictionary of word counts. Args: token_counts: a dictionary of Unicode strings to int. min_count: an integer - discard subtokens with lower counts. num_iterations: an integer. how many iterations of refinement. num_reserved_ids: an integer. how many ids to reserve for special tokens.'
def build_from_token_counts(self, token_counts, min_count, num_iterations=4, num_reserved_ids=NUM_RESERVED_TOKENS):
self._init_alphabet_from_tokens(six.iterkeys(token_counts)) self._init_subtokens_from_list(list(self._alphabet), reserved=num_reserved_ids) if (min_count < 1): min_count = 1 for i in xrange(num_iterations): tf.logging.info('Iteration {0}'.format(i)) subtoken_counts = collections.defaultdict(int) for (token, count) in six.iteritems(token_counts): escaped_token = _escape_token(token, self._alphabet) subtokens = self._escaped_token_to_subtoken_strings(escaped_token) start = 0 for subtoken in subtokens: for end in xrange((start + 1), (len(escaped_token) + 1)): new_subtoken = escaped_token[start:end] subtoken_counts[new_subtoken] += count start += len(subtoken) len_to_subtoken_strings = [] for (subtoken_string, count) in six.iteritems(subtoken_counts): lsub = len(subtoken_string) if (count >= min_count): while (len(len_to_subtoken_strings) <= lsub): len_to_subtoken_strings.append(set()) len_to_subtoken_strings[lsub].add(subtoken_string) new_subtoken_strings = [] for lsub in xrange((len(len_to_subtoken_strings) - 1), 0, (-1)): subtoken_strings = len_to_subtoken_strings[lsub] for subtoken_string in subtoken_strings: count = subtoken_counts[subtoken_string] if (count >= min_count): if (subtoken_string not in self._alphabet): new_subtoken_strings.append((count, subtoken_string)) for l in xrange(1, lsub): subtoken_counts[subtoken_string[:l]] -= count new_subtoken_strings.extend(((subtoken_counts.get(a, 0), a) for a in self._alphabet)) new_subtoken_strings.sort(reverse=True) self._init_subtokens_from_list([subtoken for (_, subtoken) in new_subtoken_strings], reserved=num_reserved_ids) tf.logging.info(('vocab_size = %d' % self.vocab_size))
'Debugging dump of the current subtoken vocabulary.'
def dump(self):
subtoken_strings = [(i, s) for (s, i) in six.iteritems(self._subtoken_string_to_id)] print(u', '.join((u"{0} : '{1}'".format(i, s) for (i, s) in sorted(subtoken_strings))))
'Initialize token information from a list of subtoken strings.'
def _init_subtokens_from_list(self, subtoken_strings, reserved=0):
self._all_subtoken_strings = (([u''] * reserved) + subtoken_strings) self._max_subtoken_len = max([len(s) for s in subtoken_strings]) self._subtoken_string_to_id = {s: (i + reserved) for (i, s) in enumerate(subtoken_strings) if s}
'Initialize alphabet from an iterable of token or subtoken strings.'
def _init_alphabet_from_tokens(self, tokens):
self._alphabet = {c for token in tokens for c in token} self._alphabet |= _ESCAPE_CHARS
'Load from a file.'
def _load_from_file(self, filename):
subtoken_strings = [] with tf.gfile.Open(filename) as f: for line in f: subtoken_strings.append(native_to_unicode(line.strip()[1:(-1)])) self._init_subtokens_from_list(subtoken_strings) self._init_alphabet_from_tokens(subtoken_strings)
'Number of float predictions per timestep.'
@property def num_output_predictions(self):
return 10
'Generator; takes 3 args: nbr_symbols, max_length, nbr_cases.'
@property def train_generator(self):
def _gen(nbr_symbols, max_length, nbr_cases): plain_vocab = range(nbr_symbols) indices = generate_plaintext_random(plain_vocab, self.distribution, nbr_cases, max_length) codes = encipher_shift(indices, plain_vocab, self.shift) for (plain, code) in zip(indices, codes): (yield {'inputs': plain, 'targets': code}) return _gen
'Generator; takes 3 args: nbr_symbols, max_length, nbr_cases.'
@property def train_generator(self):
def _gen(nbr_symbols, max_length, nbr_cases): plain_vocab = range(nbr_symbols) indices = generate_plaintext_random(plain_vocab, self.distribution, nbr_cases, max_length) codes = encipher_vigenere(indices, plain_vocab, self.key) for (plain, code) in zip(indices, codes): (yield {'inputs': plain, 'targets': code}) return _gen
'Initialize shift layer. Args: vocab: (list of String) the vocabulary shift: (Integer) the amount of shift apply to the alphabet. Positive number implies shift to the right, negative number implies shift to the left.'
def __init__(self, vocab, shift):
self.shift = shift alphabet = vocab shifted_alphabet = deque(alphabet) shifted_alphabet.rotate(shift) self.encrypt = dict(zip(alphabet, list(shifted_alphabet))) self.decrypt = dict(zip(list(shifted_alphabet), alphabet))
'Check if name is in orig_ctr or in one of the other type containers.'
def _check_reset_and_type_change(self, name, orig_ctr):
if (name in orig_ctr): tf.logging.warning('Overwriting hparam %s', name) ctr_names = [(self._categorical_params, 'categorical'), (self._discrete_params, 'discrete'), (self._float_params, 'float'), (self._int_params, 'int')] (ctrs, names) = list(zip(*ctr_names)) orig_name = names[ctrs.index(orig_ctr)] for (ctr, ctr_name) in ctr_names: if (ctr is orig_ctr): continue if (name in ctr): raise ValueError(('Setting hyperparameter %s as type %s, but a hyperparemeter of the same name was originally registered as type %s' % (name, ctr_name, orig_name)))
'Create or get concatenated embedding or softmax variable. Returns: a list of self._num_shards Tensors.'
def _get_weights(self):
num_shards = self._model_hparams.symbol_modality_num_shards shards = [] for i in xrange(num_shards): shard_size = ((self._vocab_size // num_shards) + (1 if (i < (self._vocab_size % num_shards)) else 0)) var_name = ('weights_%d' % i) shards.append(tf.get_variable(var_name, [shard_size, self._body_input_depth], initializer=tf.random_normal_initializer(0.0, (self._body_input_depth ** (-0.5))))) if (num_shards == 1): ret = shards[0] else: ret = tf.concat(shards, 0) ret = eu.ConvertGradientToTensor(ret) return ret
'Generate logits. Args: body_output: A Tensor with shape [batch, p0, p1, body_input_depth] Returns: logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].'
def top(self, body_output, _):
if self._model_hparams.shared_embedding_and_softmax_weights: scope_name = 'shared' reuse = True else: scope_name = 'softmax' reuse = False with tf.variable_scope(scope_name, reuse=reuse): var = self._get_weights() shape = tf.shape(body_output)[:(-1)] body_output = tf.reshape(body_output, [(-1), self._body_input_depth]) logits = tf.matmul(body_output, var, transpose_b=True) logits = tf.reshape(logits, tf.concat([shape, [self._vocab_size]], 0)) return tf.expand_dims(logits, 3)
'Transform input from data space to model space. Perform the Xception "Entry flow", which consists of two convolutional filter upscalings followed by three residually connected separable convolution blocks. Args: inputs: A Tensor with shape [batch, ...] Returns: body_input: A Tensor with shape [batch, ?, ?, body_input_depth].'
def bottom(self, inputs):
with tf.variable_scope(self.name): def xnet_resblock(x, filters, res_relu, name): with tf.variable_scope(name): y = common_layers.separable_conv_block(x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding='SAME', force2d=True, name='sep_conv_block') y = common_layers.pool(y, (3, 3), 'MAX', 'SAME', strides=(2, 2)) return (y + common_layers.conv_block(x, filters, [((1, 1), (1, 1))], padding='SAME', strides=(2, 2), first_relu=res_relu, force2d=True, name='res_conv0')) inputs = common_layers.standardize_images(inputs) x = common_layers.conv_block(inputs, 32, [((1, 1), (3, 3))], first_relu=False, padding='SAME', strides=(2, 2), force2d=True, name='conv0') x = common_layers.conv_block(x, 64, [((1, 1), (3, 3))], padding='SAME', force2d=True, name='conv1') x = xnet_resblock(x, min(128, self._body_input_depth), True, 'block0') x = xnet_resblock(x, min(256, self._body_input_depth), False, 'block1') return xnet_resblock(x, self._body_input_depth, False, 'block2')
'Transform input from data space to model space. Args: inputs: A Tensor with shape [batch, ...] Returns: body_input: A Tensor with shape [batch, ?, ?, body_input_depth].'
def bottom(self, inputs):
with tf.variable_scope(self.name): def xnet_resblock(x, filters, res_relu, name): with tf.variable_scope(name): y = common_layers.separable_conv_block(x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding='SAME', force2d=True, name='sep_conv_block') y = common_layers.pool(y, (3, 3), 'MAX', 'SAME', strides=(2, 2)) return (y + common_layers.conv_block(x, filters, [((1, 1), (1, 1))], padding='SAME', strides=(2, 2), first_relu=res_relu, force2d=True, name='res_conv0')) x = (tf.to_float(inputs) / 255.0) x.set_shape([None, None, None, 1]) for i in xrange(self._model_hparams.audio_compression): x = xnet_resblock(x, (2 ** (i + 1)), True, ('compress_block_%d' % i)) return xnet_resblock(x, self._body_input_depth, False, 'compress_block_final')
'Transform input from data space to model space. Args: inputs: A Tensor with shape [batch, ...] Returns: body_input: A Tensor with shape [batch, ?, ?, body_input_depth].'
def bottom(self, inputs):
with tf.variable_scope(self.name): def xnet_resblock(x, filters, res_relu, name): with tf.variable_scope(name): y = common_layers.separable_conv_block(x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding='SAME', force2d=True, name='sep_conv_block') y = common_layers.pool(y, (3, 3), 'MAX', 'SAME', strides=(2, 1)) return (y + common_layers.conv_block(x, filters, [((1, 1), (1, 1))], padding='SAME', strides=(2, 1), first_relu=res_relu, force2d=True, name='res_conv0')) x = tf.bitcast(inputs, tf.float32) x.set_shape([None, None, None, 1]) for i in xrange(self._model_hparams.audio_compression): x = xnet_resblock(x, (2 ** (i + 1)), True, ('compress_block_%d' % i)) return xnet_resblock(x, self._body_input_depth, False, 'compress_block_final')
'Transform inputs from model space to target space. Perform the Xception "Exit flow", consisting of a single residual block and two separable convolutional upscalings followed by global spatial average pooling. Args: body_output: A Tensor with shape [batch, ?, ?, body_output_size]. Returns: a Tensors, each with shape [batch_size, ?, ?, vocab_size]'
def top(self, body_output, _):
with tf.variable_scope(self.name): x = body_output if self._is_2d: length_float = tf.to_float(tf.shape(x)[1]) spatial_dim_float = tf.sqrt(length_float) spatial_dim = tf.to_int32(spatial_dim_float) x = tf.reshape(x, [(-1), spatial_dim, spatial_dim, self._body_input_depth]) x = common_layers.conv_block_downsample(x, self._kernel, self._strides, self._padding) x = tf.nn.relu(x) x = tf.reduce_mean(x, axis=[1, 2], keep_dims=True) res = common_layers.conv(x, self._vocab_size, (1, 1)) return tf.expand_dims(res, 3)
'Create a T2TModel. Args: hparams: a hyperparameters object. mode: The execution mode, as defined in tf.contrib.learn.ModeKeys. problem_hparams: a hyperparameters object. problem_idx: an integer. data_parallelism: a expert_utils.parallelism (specifies devices for data parallelism). ps_devices: a list of devices to be used for experts Returns: a T2TModel'
def __init__(self, hparams, mode, problem_hparams, problem_idx=0, data_parallelism=None, ps_devices=None):
if (data_parallelism is None): data_parallelism = eu.Parallelism(['']) if (ps_devices is None): ps_devices = [''] hparams = copy.copy(hparams) hparams.add_hparam('mode', mode) if (mode != tf.contrib.learn.ModeKeys.TRAIN): for key in hparams.values(): if (key[(- len('dropout')):] == 'dropout'): setattr(hparams, key, 0.0) if hparams.shared_embedding_and_softmax_weights: same_vocab_sizes = True for problem in hparams.problems: if ('inputs' in problem.input_modality): if (problem.input_modality['inputs'] != problem.target_modality): same_vocab_sizes = False if (not same_vocab_sizes): tf.logging.info('Unsetting shared_embedding_and_softmax_weights.') hparams.shared_embedding_and_softmax_weights = 0 self._hparams = hparams self._data_parallelism = data_parallelism self._num_datashards = data_parallelism.n self._ps_devices = ps_devices self._problem_hparams = problem_hparams self._problem_idx = problem_idx self._create_modalities(problem_hparams, hparams)
'Construct modalities in problem_hparams.'
def _create_modalities(self, problem_hparams, hparams):
input_modality_overrides = {} for override_str in hparams.input_modalities.split(';'): if (override_str != 'default'): parts = override_str.split(':') feature_name = parts[0] modality_name = ':'.join(parts[1:]) input_modality_overrides[feature_name] = modality_name target_modality_name = None if (hparams.target_modality and (hparams.target_modality != 'default')): target_modality_name = hparams.target_modality input_modality = {} for (f, modality_spec) in six.iteritems(problem_hparams.input_modality): if (f in input_modality_overrides): _warn_changed_modality_type(input_modality_overrides[f], modality_spec[0], f) modality_spec = (input_modality_overrides[f], modality_spec[1]) input_modality[f] = registry.create_modality(modality_spec, hparams) problem_hparams.input_modality = input_modality target_modality_spec = problem_hparams.target_modality if target_modality_name: _warn_changed_modality_type(target_modality_name, target_modality_spec[0], 'target') target_modality_spec = (target_modality_name, target_modality_spec[1]) target_modality = registry.create_modality(target_modality_spec, hparams) problem_hparams.target_modality = target_modality
'Autoregressive eval. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. last_position_only: a boolean, speed-up by computing last position only. Returns: sharded_logits: a list of `Tensor`s. Assumes one datashard. losses: a dictionary: {loss-name (string): floating point `Scalar`}. Contains a single key "training".'
def eval_autoregressive(self, features=None, decode_length=50, last_position_only=False):
(_, logits, losses) = self._greedy_infer(features, decode_length=decode_length, last_position_only=last_position_only) return ([logits], losses)
'A inference method. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. last_position_only: a boolean, speed-up by computing last position only. alpha: Float that controls the length penalty. larger the alpha, stronger the preference for slonger translations. Returns: samples: an integer `Tensor`.'
def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, last_position_only=False, alpha=0.0):
if (not self.has_input): beam_size = 1 self._hparams.sampling_method = 'random' if _is_class_modality(self._hparams.problems[self._problem_idx].target_modality): beam_size = 1 if (beam_size == 1): tf.logging.info('Greedy Decoding') (samples, _, _) = self._greedy_infer(features, decode_length, last_position_only) else: tf.logging.info(('Beam Decoding with beam size %d' % beam_size)) samples = self._beam_decode(features, decode_length, beam_size, top_beams, last_position_only, alpha) return samples
'Beam search decoding. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. last_position_only: a boolean, speed-up by computing last position only. alpha: Float that controls the length penalty. larger the alpha, stronger the preference for slonger translations. Returns: samples: an integer `Tensor`. Top samples from the beam search'
def _beam_decode(self, features, decode_length, beam_size, top_beams, last_position_only, alpha):
def symbols_to_logits_fn(ids): 'Go from ids to logits.' ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3) ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]]) features['targets'] = ids self._coverage = None (sharded_logits, _) = self.model_fn(features, False, last_position_only=last_position_only) logits = sharded_logits[0] if last_position_only: return tf.squeeze(logits, axis=[1, 2, 3]) current_output_position = (tf.shape(ids)[1] - 1) logits = logits[:, current_output_position, :, :] return tf.squeeze(logits, axis=[1, 2]) batch_size = tf.shape(features['inputs'])[0] initial_ids = tf.zeros([batch_size], dtype=tf.int32) inputs_old = features['inputs'] features['inputs'] = tf.expand_dims(features['inputs'], 1) if (len(features['inputs'].shape) < 5): features['inputs'] = tf.expand_dims(features['inputs'], 4) features['inputs'] = tf.tile(features['inputs'], [1, beam_size, 1, 1, 1]) s = tf.shape(features['inputs']) features['inputs'] = tf.reshape(features['inputs'], [(s[0] * s[1]), s[2], s[3], s[4]]) target_modality = self._hparams.problems[self._problem_idx].target_modality vocab_size = target_modality.top_dimensionality decode_length = (tf.shape(features['inputs'])[1] + tf.constant(decode_length)) (ids, scores) = beam_search.beam_search(symbols_to_logits_fn, initial_ids, beam_size, decode_length, vocab_size, alpha) features['inputs'] = inputs_old return_scores = False if (top_beams == 1): if return_scores: return {'outputs': ids[:, 0, 1:], 'scores': scores} return ids[:, 0, 1:] else: if return_scores: return {'outputs': ids[:, :top_beams, 1:], 'scores': scores} return ids[:, :top_beams, 1:]
'A slow greedy inference method. Quadratic time in decode_length. Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. last_position_only: a boolean, speed-up by computing last position only. Returns: samples: an integer `Tensor`. logits: `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. losses: a dictionary: {loss-name (string): floating point `Scalar`}'
def _greedy_infer(self, features, decode_length, last_position_only):
if (not features): features = {} inputs_old = None if (('inputs' in features) and (len(features['inputs'].shape) < 4)): inputs_old = features['inputs'] features['inputs'] = tf.expand_dims(features['inputs'], 2) if (not self.has_input): features['partial_targets'] = tf.to_int64(features['inputs']) def infer_step(recent_output, recent_logits, unused_loss): 'Inference step.' recent_output.set_shape([None, None, None, 1]) padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]]) features['targets'] = padded (samples, logits, losses) = self.sample(features, last_position_only=last_position_only) if last_position_only: cur_sample = samples[:, (-1), :, :] else: cur_sample = samples[:, tf.shape(recent_output)[1], :, :] cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1)) samples = tf.concat([recent_output, cur_sample], axis=1) samples.set_shape([None, None, None, 1]) logits = tf.concat([recent_logits, logits[0][:, (-1):]], 1) loss = sum(losses.values()) return (samples, logits, loss) if ('partial_targets' in features): initial_output = tf.convert_to_tensor(features['partial_targets']) else: batch_size = tf.shape(features['inputs'])[0] initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64) initial_output = tf.slice(initial_output, [0, 0, 0, 0], tf.shape(initial_output)) target_modality = self._hparams.problems[self._problem_idx].target_modality if _is_class_modality(target_modality): decode_length = 1 else: decode_length = (tf.shape(features['inputs'])[1] + decode_length) result = initial_output logits = tf.zeros((batch_size, 0, 1, 1, target_modality.top_dimensionality)) logits.set_shape([None, None, None, None, None]) loss = 0.0 (result, logits, loss) = tf.while_loop((lambda result, logits, loss: (tf.shape(result)[1] < decode_length)), infer_step, [result, logits, loss], shape_invariants=[tf.TensorShape([None, None, None, None]), tf.TensorShape([None, None, None, None, None]), tf.TensorShape([])], back_prop=False, parallel_iterations=1) if (inputs_old is not None): features['inputs'] = inputs_old losses = {'training': loss} return (result, logits, losses)
'Run the model and extract samples. Args: features: an map of string to `Tensor`. last_position_only: a boolean, speed-up by computing last position only. Returns: samples: an integer `Tensor`. logits: a list of `Tensor`s, one per datashard. losses: a dictionary: {loss-name (string): floating point `Scalar`}.'
def sample(self, features, last_position_only=False):
(sharded_logits, losses) = self.model_fn(features, False, last_position_only=last_position_only) if (self._hparams.sampling_method == 'argmax'): sharded_samples = self._data_parallelism(tf.argmax, sharded_logits, 4) else: assert (self._hparams.sampling_method == 'random') def _multinomial_squeeze(logits): reshaped_logits = tf.reshape(logits, [(-1), tf.shape(logits)[(-1)]]) choices = tf.multinomial(reshaped_logits, 1) choices = tf.reshape(choices, tf.shape(logits)[:(logits.get_shape().ndims - 1)]) return choices sharded_samples = self._data_parallelism(_multinomial_squeeze, sharded_logits) return (tf.concat(sharded_samples, 0), sharded_logits, losses)
'Computes the entire model and produces sharded logits and losses. Args: features: A dictionary of feature name to tensor. skip: a boolean, if we\'re just dummy-calling and actually skip this model (but we need to create variables to not confuse distributed training). last_position_only: a boolean, compute logits for only the last position. Returns: sharded_logits: a list of `Tensor`s, one per datashard. losses: a dictionary: {loss-name (string): floating point `Scalar`}.'
def model_fn(self, features, skip=False, last_position_only=False):
start_time = time.time() dp = self._data_parallelism sharded_features = self._shard_features(features) transformed_features = {} all_previous_modalities = [] for (key, input_modality) in six.iteritems(self._problem_hparams.input_modality): previous_modalities = [self._hparams.problems[i].input_modality[key].name for i in xrange(self._problem_idx)] all_previous_modalities.extend(previous_modalities) do_reuse = (input_modality.name in all_previous_modalities) with tf.variable_scope(input_modality.name, reuse=do_reuse): transformed_features[key] = input_modality.bottom_sharded(sharded_features[key], dp) all_previous_modalities.append(input_modality.name) if ('target_space_id' in features): transformed_features['target_space_id'] = ([features['target_space_id']] * self._num_datashards) previous_tgt_modalities = [self._hparams.problems[i].target_modality.name for i in xrange(self._problem_idx)] all_previous_modalities.extend(previous_tgt_modalities) target_modality = self._problem_hparams.target_modality target_reuse = (target_modality.name in previous_tgt_modalities) with tf.variable_scope(target_modality.name, reuse=target_reuse): transformed_features['targets'] = target_modality.targets_bottom_sharded(sharded_features['targets'], dp) with tf.variable_scope('body', reuse=(self._problem_idx > 0)): if skip: body_outputs = transformed_features['targets'] losses = {'extra': 0.0} else: (body_outputs, losses) = self.model_fn_body_sharded(transformed_features) if (not isinstance(losses, dict)): losses = {'extra': losses} with tf.variable_scope(target_modality.name, reuse=target_reuse): if (not last_position_only): sharded_logits = target_modality.top_sharded(body_outputs, sharded_features['targets'], self._data_parallelism) training_loss = target_modality.loss_sharded(sharded_logits, sharded_features['targets'], self._data_parallelism) training_loss *= self._problem_hparams.loss_multiplier else: last_position_body_outputs = [tf.expand_dims(body_shard[:, (-1), :, :], axis=[1]) for body_shard in body_outputs] last_position_targets = [tf.expand_dims(target_shard[:, (-1):, :, :], axis=[1]) for target_shard in sharded_features['targets']] sharded_logits = target_modality.top_sharded(last_position_body_outputs, last_position_targets, self._data_parallelism) training_loss = None tf.logging.info(('This model_fn took %.3f sec.' % (time.time() - start_time))) losses['training'] = training_loss return (sharded_logits, losses)
'Mixture-of-experts models will override this function. Compute model body on all datashards. Args: sharded_features: map from string to list of Tensors each with shape [batch, ?, ?, body_input_size] Returns: sharded_body_output: a list of Tensors, each with shape [batch, O, P, body_output_size] extra_loss: a Scalar.'
def model_fn_body_sharded(self, sharded_features):
with tf.name_scope('model'): datashard_to_features = [{k: v[d] for (k, v) in six.iteritems(sharded_features)} for d in xrange(self._num_datashards)] output = self._data_parallelism(_with_timing(self.model_fn_body, 'model_fn_body'), datashard_to_features) if isinstance(output, tuple): losses_sharded = output[1] if isinstance(losses_sharded[0], dict): loss = {} for k in losses_sharded[0].keys(): k_loss_sharded = [losses[k] for losses in losses_sharded] loss[k] = tf.reduce_mean(k_loss_sharded) else: loss = {'extra': tf.reduce_mean(losses_sharded)} output = output[0] else: loss = {'extra': 0.0} return (output, loss)
'Most models will override this function. Compute label logits for one shard as a function of the transformed features. Args: features: A dictionary of key to Tensor. Each Tensor has shape [batch_size, ?, ?, hidden_size]. Returns: output: tensor of logits with shape [batch_size, O, P, body_output_size. losses: either single loss as a scalar, a list, a tensor (to be averaged) or a dictionary of losses.'
def model_fn_body(self, features):
raise NotImplementedError('Abstract Method')
'Construct a new YellowFin optimizer. Args: learning_rate: A Tensor or a floating point value. The learning rate. momentum: A Tensor or a floating point value. The momentum. clip_thresh: A Tensor or a floating point value. The cliping threshold for tf.clip_by_global_norm. If None, no clipping will be carried out. beta: A float value or a constant float tensor. The smoothing parameter for estimations. curvature_window_width: A int value or a constant int tensor. The curvature window width. zero_debias: A boolean, zero debias moving-averages. delta_mu: For extensions. Not necessary in the basic use. Note: clip_thresh is the threshold value on ||lr * gradient||, delta_mu can be place holder/variable/tensor scalar. They are used for additional momentum in situations such as asynchronous-parallel training. The default is 0.0(or None) for basic usage of the optimizer. Other features: If you want to manually control the learning rates, self.lr_factor is an interface to the outside, it is an multiplier for the internal learning rate in YellowFin. It is helpful when you want to do additional hand tuning or some decaying scheme to the tuned learning rate in YellowFin. Example on using lr_factor can be found here: https://github.com/JianGoForIt/YellowFin/blob/master/char-rnn-tensorflow/train_YF.py#L140'
def __init__(self, learning_rate=1.0, momentum=0.0, clip_thresh=None, beta=0.999, curvature_window_width=20, zero_debias=True, delta_mu=0.0):
self._lr = learning_rate self._mu = momentum self._lr_var = tf.Variable(learning_rate, dtype=tf.float32, name='YF_lr', trainable=False) self._mu_var = tf.Variable(momentum, dtype=tf.float32, name='YF_mu', trainable=False) self.lr_factor = tf.Variable(1.0, dtype=tf.float32, name='YF_lr_factor', trainable=False) if (clip_thresh is not None): self._clip_thresh_var = tf.Variable(clip_thresh, dtype=tf.float32, name='YF_clip_thresh', trainable=False) else: self._clip_thresh_var = None self._lr_m = (self._lr_var * self.lr_factor) self._mu_m = (self._mu_var + delta_mu) self._momentum_optimizer = tf.train.MomentumOptimizer(self._lr_m, self._mu_m) self._beta = beta self._moving_averager = None self._step = tf.Variable(0, dtype=tf.int32, name='YF_step', trainable=False) self._increment_step_op = None self._do_tune = tf.greater(self._step, tf.constant(0)) self._zero_debias = zero_debias self.curvature_window_width = curvature_window_width self._curv_win = None self._grad = None self._vars = None self._grad_squared = None self._grad_norm_squared = None self._grad_norm_squared_avg = None self._grad_avg = None self._grad_avg_squared = None self._h_max_t = None self._h_min_t = None self._h_min = None self._h_max = None self._grad_var = None self._grad_norm = None self._grad_norm_avg = None self._d_t = None self._dist_to_opt_avg = None self._moving_averager = None
'Curvature range. Returns: h_max_t, h_min_t ops'
def _curvature_range(self):
self._curv_win = tf.Variable(np.zeros([self.curvature_window_width]), dtype=tf.float32, name='curv_win', trainable=False) self._curv_win = tf.scatter_update(self._curv_win, (self._step % self.curvature_window_width), self._grad_norm_squared) valid_window = tf.slice(self._curv_win, tf.constant([0]), tf.expand_dims(tf.minimum(tf.constant(self.curvature_window_width), (self._step + 1)), axis=0)) self._h_min_t = tf.reduce_min(valid_window) self._h_max_t = tf.reduce_max(valid_window) curv_range_ops = [] with tf.control_dependencies([self._h_min_t, self._h_max_t]): avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t]) with tf.control_dependencies([avg_op]): self._h_min = tf.identity(self._moving_averager.average(self._h_min_t)) self._h_max = tf.identity(self._moving_averager.average(self._h_max_t)) curv_range_ops.append(avg_op) return curv_range_ops
'Estimate of gradient Variance. Returns: C_t ops.'
def _grad_variance(self):
grad_var_ops = [] tensor_to_avg = [] for (t, g) in zip(self._vars, self._grad): if isinstance(g, tf.IndexedSlices): tensor_to_avg.append(tf.reshape(tf.unsorted_segment_sum(g.values, g.indices, g.dense_shape[0]), shape=t.get_shape())) else: tensor_to_avg.append(g) avg_op = self._moving_averager.apply(tensor_to_avg) grad_var_ops.append(avg_op) with tf.control_dependencies([avg_op]): self._grad_avg = [self._moving_averager.average(val) for val in tensor_to_avg] self._grad_avg_squared = [tf.square(val) for val in self._grad_avg] self._grad_avg_squared = tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared]) self._grad_var = (self._grad_norm_squared_avg - self._grad_avg_squared) return grad_var_ops
'Distance to optimum. Returns: D_t ops'
def _dist_to_opt(self):
dist_to_opt_ops = [] self._grad_norm = tf.sqrt(self._grad_norm_squared) avg_op = self._moving_averager.apply([self._grad_norm]) dist_to_opt_ops.append(avg_op) with tf.control_dependencies([avg_op]): self._grad_norm_avg = self._moving_averager.average(self._grad_norm) self._d_t = (self._grad_norm_avg / self._grad_norm_squared_avg) avg_op = self._moving_averager.apply([self._d_t]) dist_to_opt_ops.append(avg_op) with tf.control_dependencies([avg_op]): self._dist_to_opt_avg = tf.identity(self._moving_averager.average(self._d_t)) return dist_to_opt_ops
'Prepare Variables for YellowFin. Returns: Grad**2, Norm, Norm**2, Mean(Norm**2) ops'
def _prepare_variables(self):
self._moving_averager = tf.train.ExponentialMovingAverage(decay=self._beta, zero_debias=self._zero_debias) assert self._grad prepare_variables_op = [] self._grad_squared = [] self._grad_norm_squared = [] for (v, g) in zip(self._vars, self._grad): if (g is None): continue with ops.colocate_with(v): self._grad_squared.append(tf.square(g)) self._grad_norm_squared = [tf.reduce_sum(g_sq) for g_sq in self._grad_squared] avg_op = self._moving_averager.apply(self._grad_norm_squared) with tf.control_dependencies([avg_op]): self._grad_norm_squared_avg = [self._moving_averager.average(val) for val in self._grad_norm_squared] self._grad_norm_squared = tf.add_n(self._grad_norm_squared) self._grad_norm_squared_avg = tf.add_n(self._grad_norm_squared_avg) prepare_variables_op.append(avg_op) return tf.group(*prepare_variables_op)
'Get lr minimzing the surrogate. Returns: The lr_t.'
def _get_lr_tensor(self):
lr = (((1.0 - tf.sqrt(self._mu)) ** 2) / self._h_min) return lr
'Get the min mu which minimize the surrogate. Returns: The mu_t.'
def _get_mu_tensor(self):
const_fact = ((((self._dist_to_opt_avg ** 2) * (self._h_min ** 2)) / 2) / self._grad_var) coef = tf.Variable([(-1.0), 3.0, 0.0, 1.0], dtype=tf.float32, name='cubic_solver_coef') coef = tf.scatter_update(coef, tf.constant(2), (- (3 + const_fact))) roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False) root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0)), tf.less(tf.real(roots), tf.constant(1.0))), tf.less(tf.abs(tf.imag(roots)), 1e-05)) root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx)), tf.constant(0)), shape=[]) dr = (self._h_max / self._h_min) mu = tf.maximum((tf.real(root) ** 2), (((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1)) ** 2)) return mu
'YellowFin auto-tuning optimizer based on momentum SGD. Returns: YF ops (Curvature range, Grad_variance, Dist_to_opt, Single-Step, Auto-Tuning)'
def _yellowfin(self):
yellowfin_ops = [] curv_range_ops = self._curvature_range() yellowfin_ops += curv_range_ops grad_var_ops = self._grad_variance() yellowfin_ops += grad_var_ops dist_to_opt_ops = self._dist_to_opt() yellowfin_ops += dist_to_opt_ops self._mu = tf.identity(tf.cond(self._do_tune, self._get_mu_tensor, (lambda : self._mu_var))) with tf.control_dependencies([self._mu]): self._lr = tf.identity(tf.cond(self._do_tune, self._get_lr_tensor, (lambda : self._lr_var))) with tf.control_dependencies([self._mu, self._lr]): self._mu = ((self._beta * self._mu_var) + ((1 - self._beta) * self._mu)) self._lr = ((self._beta * self._lr_var) + ((1 - self._beta) * self._lr)) yellowfin_ops.append(tf.assign(self._mu_var, self._mu)) yellowfin_ops.append(tf.assign(self._lr_var, self._lr)) yellowfin_ops = tf.group(*yellowfin_ops) return yellowfin_ops
'Applying gradients aand tune hyperparams with YellowFin. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. Returns: (A group of operations) Variable Update with Momentum ops, YellowFin ops(Curvature, Variance, Distance) ops, SingleStep and lr_mu tuning ops, Step increment ops.'
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
(self._grad, self._vars) = zip(*[(g, t) for (g, t) in grads_and_vars if (g is not None)]) with tf.variable_scope('apply_updates'): if (self._clip_thresh_var is not None): (self._grads_clip, self._grads_norm) = tf.clip_by_global_norm(self._grad, self._clip_thresh_var) apply_grad_op = self._momentum_optimizer.apply_gradients(zip(self._grads_clip, self._vars), global_step=global_step) else: apply_grad_op = self._momentum_optimizer.apply_gradients(zip(self._grad, self._vars), global_step=global_step) with tf.variable_scope('prepare_yellowFin_variables'): prepare_variables_op = self._prepare_variables() with tf.variable_scope('yellowfin'): with tf.control_dependencies([prepare_variables_op]): yellowfin_op = self._yellowfin() with tf.control_dependencies([yellowfin_op]): self._increment_step_op = tf.assign_add(self._step, 1).op return tf.group(apply_grad_op, prepare_variables_op, yellowfin_op, self._increment_step_op)
'Compute gradients through momentum optimizer. Args: loss: A Tensor containing the value to minimize. var_list: Optional list or tuple of tf.Variable to update to minimize loss. Defaults to the list of variables collected in the graph under the key GraphKey.TRAINABLE_VARIABLES. global_step: Optional Variable to increment by one after the variables have been updated. gate_gradients: How to gate the computation of gradients. Can be GATE_NONE, GATE_OP, or GATE_GRAPH. aggregation_method: Specifies the method used to combine gradient terms. Valid values are defined in the class AggregationMethod. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. grad_loss: Optional. A Tensor holding the gradient computed for loss. Returns: A list of (gradient, variable) pairs. Variable is always present, but gradient can be None.'
def compute_gradients(self, loss, var_list, global_step=None, gate_gradients=GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None):
return self._momentum_optimizer.compute_gradients(loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss)
'Adapted from Tensorflow Optimizer base class member function. Add operations to minimize `loss` by updating `var_list`. This method simply combines calls `compute_gradients()` and `apply_gradients()`. If you want to process the gradient before applying them call `tf.gradients()` and `self.apply_gradients()` explicitly instead of using this function. Args: loss: A Tensor containing the value to minimize. global_step: Optional Variable to increment by one after the variables have been updated. var_list: Optional list or tuple of Variable objects to update to minimize loss. Defaults to the list of variables collected in the graph under the key GraphKeys.TRAINABLE_VARIABLES. gate_gradients: How to gate the computation of gradients. Can be GATE_NONE, GATE_OP, or GATE_GRAPH. aggregation_method: Specifies the method used to combine gradient terms. Valid values are defined in the class AggregationMethod. colocate_gradients_with_ops: If True, try colocating gradients with the corresponding op. name: Optional name for the returned operation. grad_loss: Optional. A Tensor holding the gradient computed for loss. Returns: An Operation that updates the variables in var_list. If global_step was not None, that operation also increments global_step. Raises: ValueError: if no gradients are provided for any variable.'
def minimize(self, loss, global_step=None, var_list=None, gate_gradients=GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None):
grads_and_vars = self._optimizer.compute_gradients(loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss) vars_with_grad = [v for (g, v) in grads_and_vars if (g is not None)] if (not vars_with_grad): raise ValueError(('No gradients provided for any variable, check your graph for ops that do not support gradients, between variables %s and loss %s.' % ([str(v) for (_, v) in grads_and_vars], loss))) for (g, v) in grads_and_vars: print('g ', g) print('v ', v) return self.apply_gradients(grads_and_vars, global_step=global_step)
'Integer, the last dimension of the predictions (vocab size).'
@property def top_dimensionality(self):
raise NotImplementedError('Abstract Method')
'Transform one shard of input. Args: x: An int32 Tensor with shape [batch, p0, p1, input_channels] Returns: A float32 Tensor with shape [batch, p0, p1, body_input_depth]'
def bottom(self, x):
raise NotImplementedError('Abstract Method')
'Transform the inputs. Args: xs: A list of num_datashards Tensors (one per shard) each with shape [batch, p0, p1, depth] data_parallelism: a expert_utils.Parallelism object Returns: shaded_body_input: A list of num_datashards Tensors, each with shape [batch, p0, p1, body_input_depth].'
def bottom_sharded(self, xs, data_parallelism):
return data_parallelism(self.bottom, xs)
'Transform one shard of targets. Args: x: An int32 Tensor with shape [batch, p0, p1, target_channels] Returns: A float32 Tensor with shape [batch, p0, p1, body_input_depth]'
def targets_bottom(self, x):
with tf.variable_scope('targets_bottom'): return self.bottom(x)
'Transform the targets. Args: xs: A list of num_datashards Tensors (one per shard) each with shape [batch, p0, p1, target_channels] data_parallelism: a expert_utils.Parallelism object Returns: shaded_body_input: A list of num_datashards Tensors, each with shape [batch, p0, p1, body_input_depth].'
def targets_bottom_sharded(self, xs, data_parallelism):
return data_parallelism(self.targets_bottom, xs)
'Generate predictions/logits for one shard of output. Most classes will override this function. Args: body_output: A Tensor with shape [batch, p0, p1, body_output_depth] targets: A Tensor with shape [batch, p0, p1, targets_channels, top_dimensionality] Returns: A Tensor of class logits.'
def top(self, body_output, targets):
raise NotImplementedError('Abstract Method')
'Generate predictions/logits for all shards. Classes with cross-shard interaction will override this function. Args: sharded_body_output: A list of Tensors. sharded_targets: A list of Tensors. data_parallelism: a expert_utils.Parallelism object. Returns: sharded_logits: A list of Tensors.'
def top_sharded(self, sharded_body_output, sharded_targets, data_parallelism):
return data_parallelism(self.top, sharded_body_output, sharded_targets)
'Compute loss numerator and denominator for one shard of output.'
def loss(self, top_out, targets, weights_fn=common_layers.weights_nonzero):
logits = top_out return common_layers.padded_cross_entropy(logits, targets, self._model_hparams.label_smoothing, weights_fn=weights_fn)
'Compute loss for all shards.'
def loss_sharded(self, sharded_top_out, sharded_targets, data_parallelism):
(sharded_loss_num, sharded_loss_den) = data_parallelism(self.loss, sharded_top_out, sharded_targets) loss = (tf.add_n(sharded_loss_num) / tf.maximum(1.0, tf.add_n(sharded_loss_den))) return loss
'Creates a FeedForwardExpert. Args: hp: hyperparameters. Call FeedForwardExpertParams() to create these. name: a string.'
def __init__(self, hp, name):
self._hp = hp hidden_layer_sizes = (hp.hidden_layer_sizes or []) num_layers = (1 + len(hidden_layer_sizes)) layer_sizes = (([hp.input_size] + hidden_layer_sizes) + [hp.output_size]) self._layer_sizes = layer_sizes self._w = [] for layer in range(num_layers): shape = layer_sizes[layer:(layer + 2)] self._w.append(tf.get_variable(('%s_layer_%d' % (name, layer)), shape, hp.dtype, hp.initializer))
'Evaluate the FeedForwardExpert on the given input. Args: x: a `Tensor` of shape `[batch_size, hp.input_size]` Returns: a `Tensor` of shape `[batch_size, hp.output_size]`'
def Eval(self, x):
hp = self._hp num_layers = len(self._w) for i in xrange(num_layers): x = tf.matmul(x, self._w[i]) if (hp.autoscale and (self._layer_sizes[i] != hp.input_size)): x *= ((self._layer_sizes[i] / hp.input_size) ** (-0.5)) if (((i + 1) < num_layers) and hp.hidden_activation): x = hp.hidden_activation(x) return x
'Create a Parallelism. Args: device_names_or_functions: A list of of length n, containing device names or device functions (see `tf.device`) reuse: True or None. Whether to reuse variables created in the first replica in the subsequent replicas. caching_devices: Either `None`, or a list of length n containing device names. daisy_chain_variables: a boolean - if true, then copies variables in a daisy chain between devices. Returns: a Parallelism.'
def __init__(self, device_names_or_functions, reuse=None, caching_devices=None, daisy_chain_variables=False):
assert device_names_or_functions self._devices = device_names_or_functions self._n = len(device_names_or_functions) self._reuse = reuse self._caching_devices = self._MaybeRepeat(caching_devices) self._daisy_chain_variables = daisy_chain_variables
'A parallel set of function calls (using the specified devices). Args: fn: a function or a list of n functions. *args: additional args. Each arg should either be not a list, or a list of length n. **kwargs: additional keyword args. Each arg should either be not a list, or a list of length n. Returns: either a single list of length n (if fn does not return a tuple), or a tuple of lists of length n (if fn returns a tuple).'
def __call__(self, fn, *args, **kwargs):
if args: my_args = TransposeListOfLists([self._MaybeRepeat(arg) for arg in args]) else: my_args = [[] for _ in xrange(self.n)] my_kwargs = [{} for _ in xrange(self.n)] for (k, v) in six.iteritems(kwargs): vals = self._MaybeRepeat(v) for i in xrange(self.n): my_kwargs[i][k] = vals[i] fns = self._MaybeRepeat(fn) outputs = [] cache = {} for i in xrange(self.n): def DaisyChainGetter(getter, name, *args, **kwargs): 'Get a variable and cache in a daisy chain.' device_var_key = (self._devices[i], name) if (device_var_key in cache): return cache[device_var_key] if (name in cache): v = tf.identity(cache[name]) else: var = getter(name, *args, **kwargs) v = tf.identity(var._ref()) cache[name] = v cache[device_var_key] = v return v def CachingGetter(getter, name, *args, **kwargs): v = getter(name, *args, **kwargs) key = (self._caching_devices[i], name) if (key in cache): return cache[key] with tf.device(self._caching_devices[i]): ret = tf.identity(v._ref()) cache[key] = ret return ret if self._daisy_chain_variables: custom_getter = DaisyChainGetter elif self._caching_devices: custom_getter = CachingGetter else: custom_getter = None with tf.name_scope(('parallel_%d' % i)): with tf.variable_scope(tf.get_variable_scope(), reuse=(True if ((i > 0) and self._reuse) else None), caching_device=self._caching_devices[i], custom_getter=custom_getter): with tf.device(self._devices[i]): outputs.append(fns[i](*my_args[i], **my_kwargs[i])) if isinstance(outputs[0], tuple): outputs = list(zip(*outputs)) outputs = tuple([list(o) for o in outputs]) return outputs
'Utility function for processing arguments that are singletons or lists. Args: x: either a list of self.n elements, or not a list. Returns: a list of self.n elements.'
def _MaybeRepeat(self, x):
if isinstance(x, list): assert (len(x) == self.n) return x else: return ([x] * self.n)
'Create a NoisyTopKGating network. Args: hp: a hyperparameters created by NoisyTopKGatingParams() name: a string'
def __init__(self, hp, name):
self._vars = [] self._hp = hp self._w_gate = tf.get_variable(('%s_gate' % name), [hp.input_size, hp.num_experts], hp.dtype, hp.initializer) self._vars.append(self._w_gate) if hp.noisy_gating: self._w_noise = tf.get_variable(('%s_noise' % name), [hp.input_size, hp.num_experts], hp.dtype, hp.initializer) self._vars.append(self._w_noise)
'Compute noisy top-k gating. Args: x: a `Tensor` of shape `[batch_size, input_size]`. train: a boolean `Scalar`. Setting this to false turns off noise. summaries: a boolean. Whether to add summaries. Returns: gates: a `Tensor` of shape `[batch_size, n]` load: a `Tensor` of shape `[n]`. If we are using noise, this is a smooth approximation of the load, and you can define a loss in terms of it to help with load-balancing.'
def Eval(self, x, train=True, summaries=False):
with tf.variable_scope('NoisyTopKGating'): hp = self._hp clean_logits = tf.matmul(x, self._w_gate) if hp.noisy_gating: raw_noise_stddev = tf.matmul(x, self._w_noise) noise_stddev = ((tf.nn.softplus(raw_noise_stddev) + hp.noise_epsilon) * tf.to_float(train)) noisy_logits = (clean_logits + (tf.random_normal(tf.shape(clean_logits)) * noise_stddev)) logits = noisy_logits if summaries: tf.summary.histogram('noisy_logits', noisy_logits) tf.summary.histogram('noise_stddev', noise_stddev) else: logits = clean_logits (top_logits, top_indices) = _MyTopK(logits, min((hp.k + 1), hp.num_experts)) top_k_logits = tf.slice(top_logits, [0, 0], [(-1), hp.k]) top_k_indices = tf.slice(top_indices, [0, 0], [(-1), hp.k]) top_k_gates = tf.nn.softmax(top_k_logits) gates = _RowwiseUnsortedSegmentSum(top_k_gates, top_k_indices, hp.num_experts) if (hp.noisy_gating and (hp.k < hp.num_experts)): load = tf.reduce_sum(_ProbInTopK(clean_logits, noisy_logits, noise_stddev, top_logits, hp.k), 0) else: load = _GatesToLoad(gates) if summaries: tf.summary.histogram('importance', tf.reduce_sum(gates, 0)) tf.summary.histogram('load', load) return (gates, load)
'Create a LocalMixtureOfExperts. Args: gating_hp: hyperparameters for the gating network. e.g. NoisyTopKGatingParams() expert_hp: hyperparameters for the expert networks. e.g. FeedForwardExpertParams() input_size: an integer. output_size: an integer. name: a string.'
def __init__(self, gating_hp, expert_hp, input_size, output_size, name):
self._name = name _SetInputOutputSizes(gating_hp, input_size, None) _SetInputOutputSizes(expert_hp, input_size, output_size) self._gating_hp = gating_hp self._gating = gating_hp.gating_class(gating_hp, (name + '_gating')) self._expert_hp = expert_hp self._experts = [expert_hp.expert_class(expert_hp, (name + ('_%d' % i))) for i in xrange(gating_hp.num_experts)]
'Evaluate mixture of experts. We provide a convenient debugging tool for determining the set of examples that we passed to each expert. The caller may provide a `Tensor` of "identifiers", of any type whose first dimension matches the number of input examples. The function will then return a list "expert_to_identifiers", with one `Tensor` for each expert containing the identifiers for all examples assigned to that expert. A parallel list of `Tensor`s, "expert_to_gates", is also returned, containing the corresponding gate values. Args: x: a `Tensor` of shape `[batch_size, input_size]` train: a boolean Scalar. Are we in training mode? per_example_multiplier: an optional `Tensor` of shape `[batch_size]` which gets multiplied into the gate values. If this LocalMixtureOfExperts represents one secondary MoE in a hierarchical MoE, then we pass in in the gate values from the primary gating function here. This causes the computed values (`y`, `importance` and `expert_to_gates`) to also reflect the primary gate values. summaries: an boolean. Enable summaries. identifiers: an optional `Tensor` whose first dimension is equal to batch_size. Returns: y: a `Tensor` of shape `[batch_size, output_size]`. Output of the MoE. importance: a `Tensor` of shape `[n]`. Batchwise sum of gates. load: a `Tensor` of shape `[n]`. Smooth estimator of the number of examples passed to each expert. This is useful for load-balancing, as any gradient on this `Tensor` will back-propagate to the gating network. expert_to_identifiers: if `identifiers` was passed in, a list of length `num_experts`. Each element is a `Tensor` whose shape matches that of `identifiers` in all but the first dimension. Contains the slices of `identifiers` corresponding to the batch elements that were dispatched to that expert. expert_to_gates: A list of length `num_experts`. Each element contains a 1-dimensional tensor'
def Eval(self, x, train=True, per_example_multiplier=None, summaries=False, identifiers=None):
gating_hp = self._gating_hp (gates, load) = self._gating.Eval(x, train, summaries) if (per_example_multiplier is not None): gates *= tf.expand_dims(per_example_multiplier, 1) dispatcher = SparseDispatcher(gating_hp.num_experts, gates) expert_input = dispatcher.Dispatch(x) expert_output = [self._experts[i].Eval(expert_input[i]) for i in xrange(gating_hp.num_experts)] y = dispatcher.Combine(expert_output) if (identifiers is not None): expert_to_identifiers = dispatcher.Dispatch(identifiers) else: expert_to_identifiers = None return (y, tf.reduce_sum(gates, 0), load, expert_to_identifiers, dispatcher.ExpertToGates())
'Create a DistributedMixtureOfExperts. If `secondary_gating_hp` is `None`, then this is a flat MoE with `primary_gating_hp.num_experts` experts. Otherwise, this is a hierarchical MoE with `primary_gating_hp.num_experts` groups of `secondary_gating_hp.num_experts` experts. The assignemnt of experts (or groups of experts) to devices is by round-robin. So to make equal use of all the devices, one should set `primary_gating_hp.num_experts` to the number of devices or a multiple thereof. Args: primary_gating_hp: hyperparameters for the primary gating network. e.g. NoisyTopKGatingParams(). secondary_gating_hp: hyperparameters for the secondary gating network. e.g. NoisyTopKGatingParams(). None indicates a flat MoE. expert_hp: hyperparameters for the expert networks. e.g. FeedForwardExpertParams() input_size: an integer. output_size: an integer. expert_devices: a list of device strings. The devices to be used for the experts. name: a string.'
def __init__(self, primary_gating_hp, secondary_gating_hp, expert_hp, input_size, output_size, expert_devices, name):
self._name = name _SetInputOutputSizes(primary_gating_hp, input_size, None) _SetInputOutputSizes(expert_hp, input_size, output_size) self._is_hierarchical = (secondary_gating_hp is not None) self._primary_gating_hp = primary_gating_hp self._primary_gating = primary_gating_hp.gating_class(primary_gating_hp, (name + '_primary_gating')) n1 = self._primary_gating_hp.num_experts expert_devices = [expert_devices[(i % len(expert_devices))] for i in xrange(n1)] self._expert_devices = expert_devices self._all_vars = [] self._all_vars.extend(self._primary_gating.vars) if self._is_hierarchical: self._secondary_moe = [] for i in xrange(n1): with tf.device(expert_devices[i]): secondary_moe = LocalMixtureOfExperts(secondary_gating_hp, expert_hp, input_size, output_size, ('%s_secondary_%d' % (name, i))) self._secondary_moe.append(secondary_moe) self._all_vars.extend(secondary_moe.vars) else: self._experts = [] for i in xrange(n1): with tf.device(expert_devices[i]): expert = expert_hp.expert_class(expert_hp, (name + ('_%d' % i))) self._experts.append(expert) self._all_vars.extend(expert.vars)
'Evaluate MoE on given inputs. This class is designed for the case where the rest of the model is using data parallelism. We receive an array of input `Tensor`s, one per datashard, and we produce a list of output Tensors, one per datashard. We provide a convenient debugging tool for determining the set of examples that we passed to each expert. The caller may provide a `Tensor` of "identifiers", of any type whose first dimension matches the number of input examples. The function will then return a list "expert_to_identifiers", with one `Tensor` for each expert containing the identifiers for all examples assigned to that expert. A parallel list of `Tensor`s, "expert_to_gates", is also returned, containing the corresponding gate values. Args: datashard_devices: a `list` of device strings of length `num_datashards`. Which devices to use for the output tensors. xs: A `list` of `Tensor`s of length `num_datashards`. Each has shape `[batch_size[d], input_size]. train: a boolean `Scalar`. When train=`True`, noise is added to the gating function. summaries: a boolean. Whether to write summaries. identifiers: an optional list of tensors. Each tensor has shape [<batch_size[datashard]>, extra_dims] shadow_xs: Optional `list` of `Tensor`s of length `num_datashards`. Each has shape `[batch_size[d], input_size]. Shadow_xs is useful if you want to dispatch a transformed version of xs to the experts, but you want untransformed xs for the gating network. Returns: ys: the output (a list of one tensor per datashard). Each has shape `[batch_size[d], output_size]. importance: a `Tensor` of shape `[n]` for a flat MoE or `[n1, n2]` for a hierarchical MoE. Batchwise sum of gates. load: a `Tensor` of shape `[n]` for a flat MoE or `[n1, n2]` for a hierarchical MoE. Smooth estimator of the number of examples passed to each expert. This is useful for load-balancing, as any gradient on this `Tensor` will back-propagate to the gating network. expert_to_identifiers: if `identifiers` was passed in, a list of length `num_experts`. Each element is a `Tensor` whose shape matches that of `identifiers` in all but the first dimension. Contains the slices of `identifiers` corresponding to the batch elements that were dispatched to that expert. expert_to_gates: a list of one tensor per expert. Each tensor has shape [<num_examples[expert]>]'
def Eval(self, datashard_devices, xs, train=True, summaries=False, identifiers=None, shadow_xs=None):
n1 = self._primary_gating_hp.num_experts epsilon = 1e-10 assert (len(datashard_devices) == len(xs)) num_datashards = len(xs) expert_devices = self._expert_devices has_identifiers = (identifiers is not None) (primary_gates, primary_smooth_load) = Parallel(datashard_devices, self._primary_gating.Eval, xs, train, ([summaries] + ([False] * (num_datashards - 1)))) primary_importance = tf.add_n(Parallel(datashard_devices, tf.reduce_sum, primary_gates, 0)) primary_smooth_load = tf.add_n(primary_smooth_load) primary_true_load = tf.add_n(Parallel(datashard_devices, _GatesToLoad, primary_gates)) primary_dispatcher = DistributedSparseDispatcher(datashard_devices, expert_devices, primary_gates) if (shadow_xs is None): secondary_input = primary_dispatcher.Dispatch(xs) else: secondary_input = primary_dispatcher.Dispatch(shadow_xs) primary_expert_to_identifiers = (primary_dispatcher.Dispatch(identifiers) if has_identifiers else None) primary_expert_to_gates = primary_dispatcher.ExpertToGates() if (not self._is_hierarchical): secondary_output = Parallel(expert_devices, (lambda a, b: a.Eval(b)), self._experts, secondary_input) ys = primary_dispatcher.Combine(secondary_output) return (ys, primary_importance, primary_smooth_load, primary_expert_to_identifiers, primary_expert_to_gates) (secondary_output, secondary_importance, secondary_load, secondary_expert_to_identifiers, secondary_expert_to_gates) = Parallel(expert_devices, [m.Eval for m in self._secondary_moe], secondary_input, train, primary_expert_to_gates, ([summaries] + ([False] * (n1 - 1))), primary_expert_to_identifiers) ys = primary_dispatcher.Combine(secondary_output, multiply_by_gates=False) importance = tf.stack(secondary_importance) load = (tf.stack(secondary_load) * tf.expand_dims((primary_smooth_load / (primary_true_load + epsilon)), 1)) expert_to_identifiers = [] if (identifiers is not None): for el in secondary_expert_to_identifiers: expert_to_identifiers.extend(el) expert_to_gates = [] for el in secondary_expert_to_gates: expert_to_gates.extend(el) return (ys, importance, load, expert_to_identifiers, expert_to_gates)
'Create a SparseDispatcher. Args: num_experts: an integer. gates: a `Tensor` of shape `[batch_size, num_experts]`. Returns: a SparseDispatcher'
def __init__(self, num_experts, gates):
self._gates = gates self._num_experts = num_experts where = tf.to_int32(tf.where((tf.transpose(gates) > 0))) (self._expert_index, self._batch_index) = tf.unstack(where, num=2, axis=1) self._part_sizes_tensor = tf.reduce_sum(tf.to_int32((gates > 0)), [0]) self._nonzero_gates = tf.gather(tf.reshape(self._gates, [(-1)]), ((self._batch_index * num_experts) + self._expert_index))
'Create one input Tensor for each expert. The `Tensor` for a expert `i` contains the slices of `inp` corresponding to the batch elements `b` where `gates[b, i] > 0`. Args: inp: a `Tensor` of shape \'[batch_size, <extra_input_dims>]` Returns: a list of `num_experts` `Tensor`s with shapes `[expert_batch_size_i, <extra_input_dims>]`.'
def Dispatch(self, inp):
inp = tf.gather(inp, self._batch_index) return tf.split(inp, self._part_sizes_tensor, 0)
'Sum together the expert output, weighted by the gates. The slice corresponding to a particular batch element `b` is computed as the sum over all experts `i` of the expert output, weighted by the corresponding gate values. If `multiply_by_gates` is set to False, the gate values are ignored. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean Returns: a `Tensor` with shape `[batch_size, <extra_output_dims>]`.'
def Combine(self, expert_out, multiply_by_gates=True):
stitched = ConvertGradientToTensor(tf.concat(expert_out, 0)) if multiply_by_gates: stitched *= tf.expand_dims(self._nonzero_gates, 1) combined = tf.unsorted_segment_sum(stitched, self._batch_index, tf.shape(self._gates)[0]) return combined
'Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32` and shapes `[expert_batch_size_i]`'
def ExpertToGates(self):
return tf.split(self._nonzero_gates, self._part_sizes_tensor, 0)
'Create a DistributedSparseDispatcher. Args: datashard_devices: a list of num_datashards device strings. expert_devices: a list of num_experts device strings. gates: a list of num_datashards `Tensor`s of shapes `[batch_size[d], num_experts]`. Returns: a DistributedSparseDispatcher'
def __init__(self, datashard_devices, expert_devices, gates):
self._gates = gates self._num_experts = len(expert_devices) assert (len(gates) == len(datashard_devices)) self._num_datashards = len(gates) self._datashard_devices = datashard_devices self._expert_devices = expert_devices self._dispatchers = Parallel(self._datashard_devices, SparseDispatcher, self._num_experts, gates)
'Create one input Tensor for each expert. Args: inp: a list of length num_datashards `Tensor`s with shapes `[batch_size[d], <extra_input_dims>]`. Returns: a list of `num_experts` `Tensor`s with shapes `[num_examples[i], <extra_input_dims>]`.'
def Dispatch(self, inp):
dispatched = Parallel(self._datashard_devices, (lambda a, b: a.Dispatch(b)), self._dispatchers, inp) ret = Parallel(self._expert_devices, tf.concat, TransposeListOfLists(dispatched), 0) if (ret[0].dtype == tf.float32): ret = Parallel(self._expert_devices, ConvertGradientToTensor, ret) return ret
'Sum together the expert output, multiplied by the corresponding gates. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean. Returns: a list of num_datashards `Tensor`s with shapes `[batch_size[d], <extra_output_dims>]`.'
def Combine(self, expert_out, multiply_by_gates=True):
expert_part_sizes = tf.unstack(tf.stack([self._dispatchers[d].part_sizes for d in xrange(self._num_datashards)]), num=self._num_experts, axis=1) expert_output_parts = Parallel(self._expert_devices, tf.split, expert_out, expert_part_sizes) expert_output_parts_t = TransposeListOfLists(expert_output_parts) ret = [] for d in xrange(self._num_datashards): with tf.device(self._datashard_devices[d]): ret.append(self._dispatchers[d].Combine(ConvertGradientToTensor(tf.concat(expert_output_parts_t[d], 0)), multiply_by_gates=multiply_by_gates)) return ret
'Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s of type `tf.float32`.'
def ExpertToGates(self):
return Parallel(self._expert_devices, tf.concat, TransposeListOfLists(Parallel(self._datashard_devices, [self._dispatchers[d].ExpertToGates for d in xrange(self._num_datashards)])), 0)
'Constructs a Dispatcher. Args: data_parallelism: a Parallelism object. model_parallelism: a Parallelism object. gates: a list of 1d integer `Tensor`s, one per datashard. Says which expert to use for each batch element. Returns: a DistributedSingleDispatcher'
def __init__(self, data_parallelism, model_parallelism, gates):
gates = data_parallelism(tf.to_int32, gates) self._gates = gates self._data_parallelism = data_parallelism self._model_parallelism = model_parallelism def _PartSizes(gates): return tf.unsorted_segment_sum(tf.ones_like(gates), gates, model_parallelism.n) part_sizes_by_datashard = data_parallelism(_PartSizes, gates) self._part_sizes_by_expert = tf.unstack(tf.stack(part_sizes_by_datashard), num=model_parallelism.n, axis=1) def _StitchIndices(gates): return tf.dynamic_partition(tf.range(tf.size(gates)), gates, model_parallelism.n) self._stitch_indices = data_parallelism(_StitchIndices, gates)
'Reshuffles input `Tensor`s to produce output `Tensor`s. The dimensions of all input and output `Tensor`s match, except for dimension 0. In dimension 0, the input `Tensor`s match the corresponding `gates` `Tensor`s which were passed to the constructor. Args: d_tensors: a list of `Tensor`s, one per datashard. Returns: a list of `Tensor`s, one per expert.'
def Dispatch(self, d_tensors):
parts = self._data_parallelism(tf.dynamic_partition, d_tensors, self._gates, self._model_parallelism.n) parts_by_expert = TransposeListOfLists(parts) x_tensors = self._model_parallelism(tf.concat, parts_by_expert, 0) return x_tensors
'Reshuffles per-expert `Tensor`s to produce per-datashard `Tensor`s. Dispatch must have been called at least once first. The dimensions of all input and output `Tensor`s match, except for dimension 0. In dimension 0, the input `Tensor`s match the corresponding outputs of `Dispatch`, and the output `Tensor`s match the corresponding `gates` `Tensor`s which were passed to the constructor. Args: x_tensors: a list of `Tensor`s, one per expert. Returns: a list of `Tensor`s, one per datashard.'
def Combine(self, x_tensors):
parts = self._model_parallelism(tf.split, x_tensors, self._part_sizes_by_expert) d_tensors = self._data_parallelism(tf.dynamic_stitch, self._stitch_indices, TransposeListOfLists(parts)) return d_tensors
'Run a step of the network.'
def step(self, sess, inp, target, do_backward_in, noise_param=None, beam_size=2, eos_id=2, eos_cost=0.0, update_mem=None, state=None):
(batch_size, height, length) = (inp.shape[0], inp.shape[1], inp.shape[2]) do_backward = do_backward_in train_mode = True if (do_backward_in is None): do_backward = False train_mode = False if (update_mem is None): update_mem = do_backward feed_in = {} if (state is None): state = np.zeros([batch_size, length, height, self.nmaps]) feed_in[self.prev_step.name] = state feed_in[self.length_tensor.name] = length feed_in[self.noise_param.name] = (noise_param if noise_param else 0.0) feed_in[self.do_training.name] = (1.0 if do_backward else 0.0) feed_in[self.update_mem.name] = (1 if update_mem else 0) if (do_backward_in is False): feed_in[self.sampling.name] = 0.0 index = 0 feed_out = [] if do_backward: feed_out.append(self.updates[index]) feed_out.append(self.grad_norms[index]) if train_mode: feed_out.append(self.losses[index]) feed_in[self.input.name] = inp feed_in[self.target.name] = target feed_out.append(self.outputs[index]) if train_mode: res = sess.run(([self.after_enc_step] + feed_out), feed_in) (after_enc_state, res) = (res[0], res[1:]) else: feed_in[self.sampling.name] = 1.1 res = sess.run(([self.after_enc_step, self.out_idx] + feed_out), feed_in) (after_enc_state, out_idx) = (res[0], res[1]) res = [res[2][l] for l in xrange(length)] outputs = [out_idx[:, i] for i in xrange(length)] cost = [0.0 for _ in xrange((beam_size * batch_size))] seen_eos = [0 for _ in xrange((beam_size * batch_size))] for (idx, logit) in enumerate(res): best = outputs[idx] for b in xrange(batch_size): if (seen_eos[b] > 1): cost[b] -= eos_cost else: cost[b] += np.log(logit[b][best[b]]) if (best[b] in [eos_id]): seen_eos[b] += 1 res = ([[(- c) for c in cost]] + outputs) offset = 0 norm = None if do_backward: offset = 2 norm = res[1] if train_mode: outputs = res[(offset + 1)] outputs = [outputs[l] for l in xrange(length)] return (res[offset], outputs, norm, after_enc_state)
'Grow the program body.'
def grow_body(self, new_var_name, dependencies, types_to_vars):
choices = [] for f in self.functions: if all([(a in types_to_vars.keys()) for a in f.arg_types]): choices.append(f) f = random.choice(choices) args = [] for t in f.arg_types: possible_vars = random.choice(types_to_vars[t]) var = random.choice(possible_vars) args.append(var) dependencies.setdefault(new_var_name, []).extend(([var] + dependencies[var])) fn_args = [random.choice(self.types_to_lambdas[t]) for t in f.fn_arg_types] types_to_vars.setdefault(f.output_type, []).append(new_var_name) return Statement(f, new_var_name, args, fn_args)
'Grow the program.'
def grow(self, program_len, input_types):
var_names = list(reversed(map(chr, range(97, 123)))) dependencies = dict() types_to_vars = dict() input_names = [] for t in input_types: var = var_names.pop() dependencies[var] = [] types_to_vars.setdefault(t, []).append(var) input_names.append(var) statements = [] for _ in range((program_len - 1)): var = var_names.pop() statements.append(self.grow_body(var, dependencies, types_to_vars)) statements.append(self.grow_body('out', dependencies, types_to_vars)) new_var_names = [c for c in map(chr, range(97, 123)) if (c not in input_names)] new_var_names.reverse() keep_statements = [] env = dict() for s in statements: if (s.output_var in dependencies['out']): keep_statements.append(s) env[s.output_var] = new_var_names.pop() if (s.output_var == 'out'): keep_statements.append(s) for k in keep_statements: k.substitute(env) return Program(input_names, input_types, ';'.join([str(k) for k in keep_statements]))
'Evaluate this program.'
def evaluate(self, inputs):
if (len(inputs) != len(self.input_names)): raise AssertionError(('inputs and input_names have tohave the same len. inp: %s , names: %s' % (str(inputs), str(self.input_names)))) inp_str = '' for (name, inp) in zip(self.input_names, inputs): inp_str += (((name + ' = ') + str(inp)) + '; ') with stdoutIO() as s: exec ((inp_str + self.body) + '; print(out)') return s.getvalue()[:(-1)]
'Returns the number of classes in the data set.'
def num_classes(self):
return 5
'Returns the number of examples in the data subset.'
def num_examples_per_epoch(self):
if (self.subset == 'train'): return 3170 if (self.subset == 'validation'): return 500
'Instruction to download and extract the tarball from Flowers website.'
def download_message(self):
print(('Failed to find any Flowers %s files' % self.subset)) print('') print('If you have already downloaded and processed the data, then make sure to set --data_dir to point to the directory containing the location of the sharded TFRecords.\n') print('Please see README.md for instructions on how to build the flowers dataset using download_and_preprocess_flowers.\n')
'Initialize VariableDeviceChooser. Args: num_parameter_servers: number of parameter servers. ps_device: string representing the parameter server device. placement: string representing the placement of the variable either CPU:0 or GPU:0. When using parameter servers forced to CPU:0.'
def __init__(self, num_parameter_servers=0, ps_device='/job:ps', placement='CPU:0'):
self._num_ps = num_parameter_servers self._ps_device = ps_device self._placement = (placement if (num_parameter_servers == 0) else 'CPU:0') self._next_task_id = 0
'Initialize dataset using a subset and the path to the data.'
def __init__(self, name, subset):
assert (subset in self.available_subsets()), self.available_subsets() self.name = name self.subset = subset
'Returns the number of classes in the data set.'
@abstractmethod def num_classes(self):
pass
'Returns the number of examples in the data subset.'
@abstractmethod def num_examples_per_epoch(self):
pass
'Prints a download message for the Dataset.'
@abstractmethod def download_message(self):
pass
'Returns the list of available subsets.'
def available_subsets(self):
return ['train', 'validation']
'Returns a python list of all (sharded) data subset files. Returns: python list of all (sharded) data set files. Raises: ValueError: if there are not data_files matching the subset.'
def data_files(self):
tf_record_pattern = os.path.join(FLAGS.data_dir, ('%s-*' % self.subset)) data_files = tf.gfile.Glob(tf_record_pattern) if (not data_files): print(('No files found for dataset %s/%s at %s' % (self.name, self.subset, FLAGS.data_dir))) self.download_message() exit((-1)) return data_files
'Return a reader for a single entry from the data set. See io_ops.py for details of Reader class. Returns: Reader object that reads the data set.'
def reader(self):
return tf.TFRecordReader()
'Returns the number of classes in the data set.'
def num_classes(self):
return 1000
'Returns the number of examples in the data set.'
def num_examples_per_epoch(self):
if (self.subset == 'train'): return 1281167 if (self.subset == 'validation'): return 50000