desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Get small set of idxs to compute nearest neighbor queries on. This is an expensive look-up on the whole memory that is used to avoid more expensive operations later on. Args: normalized_query: A Tensor of shape [None, key_dim]. Returns: A Tensor of shape [None, choose_k] of indices in memory that are closest to the queries.'
def get_hint_pool_idxs(self, normalized_query):
with tf.device(self.nn_device): similarities = tf.matmul(tf.stop_gradient(normalized_query), self.mem_keys, transpose_b=True, name='nn_mmul') (_, hint_pool_idxs) = tf.nn.top_k(tf.stop_gradient(similarities), k=self.choose_k, name='nn_topk') return hint_pool_idxs
'Function that creates all the update ops.'
def make_update_op(self, upd_idxs, upd_keys, upd_vals, batch_size, use_recent_idx, intended_output):
mem_age_incr = self.mem_age.assign_add(tf.ones([self.memory_size], dtype=tf.float32)) with tf.control_dependencies([mem_age_incr]): mem_age_upd = tf.scatter_update(self.mem_age, upd_idxs, tf.zeros([batch_size], dtype=tf.float32)) mem_key_upd = tf.scatter_update(self.mem_keys, upd_idxs, upd_keys) mem_val_upd = tf.scatter_update(self.mem_vals, upd_idxs, upd_vals) if use_recent_idx: recent_idx_upd = tf.scatter_update(self.recent_idx, intended_output, upd_idxs) else: recent_idx_upd = tf.group() return tf.group(mem_age_upd, mem_key_upd, mem_val_upd, recent_idx_upd)
'Queries memory for nearest neighbor. Args: query_vec: A batch of vectors to query (embedding of input to model). intended_output: The values that would be the correct output of the memory. use_recent_idx: Whether to always insert at least one instance of a correct memory fetch. Returns: A tuple (result, mask, teacher_loss). result: The result of the memory look up. mask: The affinity of the query to the result. teacher_loss: The loss for training the memory module.'
def query(self, query_vec, intended_output, use_recent_idx=True):
batch_size = tf.shape(query_vec)[0] output_given = (intended_output is not None) query_vec = tf.matmul(query_vec, self.query_proj) normalized_query = tf.nn.l2_normalize(query_vec, dim=1) hint_pool_idxs = self.get_hint_pool_idxs(normalized_query) if (output_given and use_recent_idx): most_recent_hint_idx = tf.gather(self.recent_idx, intended_output) hint_pool_idxs = tf.concat(axis=1, values=[hint_pool_idxs, tf.expand_dims(most_recent_hint_idx, 1)]) choose_k = tf.shape(hint_pool_idxs)[1] with tf.device(self.var_cache_device): my_mem_keys = tf.stop_gradient(tf.gather(self.mem_keys, hint_pool_idxs, name='my_mem_keys_gather')) similarities = tf.matmul(tf.expand_dims(normalized_query, 1), my_mem_keys, adjoint_b=True, name='batch_mmul') hint_pool_sims = tf.squeeze(similarities, [1], name='hint_pool_sims') hint_pool_mem_vals = tf.gather(self.mem_vals, hint_pool_idxs, name='hint_pool_mem_vals') softmax_temp = max(1.0, (np.log((0.2 * self.choose_k)) / self.alpha)) mask = tf.nn.softmax((hint_pool_sims[:, :(choose_k - 1)] * softmax_temp)) teacher_hints = tf.to_float(tf.abs((tf.expand_dims(intended_output, 1) - hint_pool_mem_vals))) teacher_hints = (1.0 - tf.minimum(1.0, teacher_hints)) (teacher_vals, teacher_hint_idxs) = tf.nn.top_k((hint_pool_sims * teacher_hints), k=1) (neg_teacher_vals, _) = tf.nn.top_k((hint_pool_sims * (1 - teacher_hints)), k=1) teacher_idxs = tf.gather(tf.reshape(hint_pool_idxs, [(-1)]), (teacher_hint_idxs[:, 0] + (choose_k * tf.range(batch_size)))) teacher_vals *= (1 - tf.to_float(tf.equal(0.0, tf.reduce_sum(teacher_hints, 1)))) nearest_neighbor = tf.to_int32(tf.argmax(hint_pool_sims[:, :(choose_k - 1)], 1)) no_teacher_idxs = tf.gather(tf.reshape(hint_pool_idxs, [(-1)]), (nearest_neighbor + (choose_k * tf.range(batch_size)))) sliced_hints = tf.slice(teacher_hints, [0, 0], [(-1), self.correct_in_top]) incorrect_memory_lookup = tf.equal(0.0, tf.reduce_sum(sliced_hints, 1)) teacher_loss = (tf.nn.relu(((neg_teacher_vals - teacher_vals) + self.alpha)) - self.alpha) with tf.device(self.var_cache_device): result = tf.gather(self.mem_vals, tf.reshape(no_teacher_idxs, [(-1)])) update_keys = normalized_query update_vals = intended_output fetched_idxs = teacher_idxs with tf.device(self.var_cache_device): fetched_keys = tf.gather(self.mem_keys, fetched_idxs, name='fetched_keys') fetched_vals = tf.gather(self.mem_vals, fetched_idxs, name='fetched_vals') fetched_keys_upd = (update_keys + fetched_keys) fetched_keys_upd = tf.nn.l2_normalize(fetched_keys_upd, dim=1) mem_age_with_noise = (self.mem_age + tf.random_uniform([self.memory_size], (- self.age_noise), self.age_noise)) (_, oldest_idxs) = tf.nn.top_k(mem_age_with_noise, k=batch_size, sorted=False) with tf.control_dependencies([result]): upd_idxs = tf.where(incorrect_memory_lookup, oldest_idxs, fetched_idxs) upd_keys = tf.where(incorrect_memory_lookup, update_keys, fetched_keys_upd) upd_vals = tf.where(incorrect_memory_lookup, update_vals, fetched_vals) def make_update_op(): return self.make_update_op(upd_idxs, upd_keys, upd_vals, batch_size, use_recent_idx, intended_output) update_op = tf.cond(self.update_memory, make_update_op, tf.no_op) with tf.control_dependencies([update_op]): result = tf.identity(result) mask = tf.identity(mask) teacher_loss = tf.identity(teacher_loss) return (result, mask, tf.reduce_mean(teacher_loss))
'Gets hashed-to buckets for batch of queries. Args: query: 2-d Tensor of query vectors. Returns: A list of hashed-to buckets for each hash function.'
def get_hash_slots(self, query):
binary_hash = [tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0) for i in xrange(self.num_libraries)] hash_slot_idxs = [tf.reduce_sum((tf.to_int32(binary_hash[i]) * tf.constant([[(2 ** i) for i in xrange(self.num_hashes)]], dtype=tf.int32)), 1) for i in xrange(self.num_libraries)] return hash_slot_idxs
'Get small set of idxs to compute nearest neighbor queries on. This is an expensive look-up on the whole memory that is used to avoid more expensive operations later on. Args: normalized_query: A Tensor of shape [None, key_dim]. Returns: A Tensor of shape [None, choose_k] of indices in memory that are closest to the queries.'
def get_hint_pool_idxs(self, normalized_query):
hash_slot_idxs = self.get_hash_slots(normalized_query) hint_pool_idxs = [tf.maximum(tf.minimum(tf.gather(self.hash_slots[i], idxs), (self.memory_size - 1)), 0) for (i, idxs) in enumerate(hash_slot_idxs)] return tf.concat(axis=1, values=hint_pool_idxs)
'Function that creates all the update ops.'
def make_update_op(self, upd_idxs, upd_keys, upd_vals, batch_size, use_recent_idx, intended_output):
base_update_op = super(LSHMemory, self).make_update_op(upd_idxs, upd_keys, upd_vals, batch_size, use_recent_idx, intended_output) hash_slot_idxs = self.get_hash_slots(upd_keys) update_ops = [] with tf.control_dependencies([base_update_op]): for (i, slot_idxs) in enumerate(hash_slot_idxs): entry_idx = tf.random_uniform([batch_size], maxval=self.num_per_hash_slot, dtype=tf.int32) entry_mul = (1 - tf.one_hot(entry_idx, self.num_per_hash_slot, dtype=tf.int32)) entry_add = (tf.expand_dims(upd_idxs, 1) * tf.one_hot(entry_idx, self.num_per_hash_slot, dtype=tf.int32)) mul_op = tf.scatter_mul(self.hash_slots[i], slot_idxs, entry_mul) with tf.control_dependencies([mul_op]): add_op = tf.scatter_add(self.hash_slots[i], slot_idxs, entry_add) update_ops.append(add_op) return tf.group(*update_ops)
'Generate random pseudo-boolean key and message values.'
def get_message_and_key(self):
batch_size = tf.placeholder_with_default(FLAGS.batch_size, shape=[]) in_m = batch_of_random_bools(batch_size, TEXT_SIZE) in_k = batch_of_random_bools(batch_size, KEY_SIZE) return (in_m, in_k)
'The model for Alice, Bob, and Eve. If key=None, the first FC layer takes only the message as inputs. Otherwise, it uses both the key and the message. Args: collection: The graph keys collection to add new vars to. message: The input message to process. key: The input key (if any) to use.'
def model(self, collection, message, key=None):
if (key is not None): combined_message = tf.concat(axis=1, values=[message, key]) else: combined_message = message with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected, tf.contrib.layers.conv2d], variables_collections=[collection]): fc = tf.contrib.layers.fully_connected(combined_message, (TEXT_SIZE + KEY_SIZE), biases_initializer=tf.constant_initializer(0.0), activation_fn=None) fc = tf.expand_dims(fc, 2) conv = tf.contrib.layers.conv2d(fc, 2, 2, 2, 'SAME', activation_fn=tf.nn.sigmoid) conv = tf.contrib.layers.conv2d(conv, 2, 1, 1, 'SAME', activation_fn=tf.nn.sigmoid) conv = tf.contrib.layers.conv2d(conv, 1, 1, 1, 'SAME', activation_fn=tf.nn.tanh) conv = tf.squeeze(conv, 2) return conv
'Initializes the ComponentBuilder from specifications. Args: master: dragnn.MasterBuilder object. component_spec: dragnn.ComponentSpec proto to be built. attr_defaults: Optional dict of component attribute defaults. If not provided or if empty, attributes are not extracted.'
def __init__(self, master, component_spec, attr_defaults=None):
self.master = master self.num_actions = component_spec.num_actions self.name = component_spec.name self.spec = component_spec self.moving_average = None self.eligible_for_self_norm = ((not self.master.hyperparams.self_norm_components_filter) or (self.name in self.master.hyperparams.self_norm_components_filter.split(','))) self._attrs = {} if attr_defaults: self._attrs = network_units.get_attrs_with_defaults(self.spec.component_builder.parameters, attr_defaults) with tf.variable_scope(self.name): self.training_beam_size = tf.constant(self.spec.training_beam_size, name='TrainingBeamSize') self.inference_beam_size = tf.constant(self.spec.inference_beam_size, name='InferenceBeamSize') self.locally_normalize = tf.constant(False, name='LocallyNormalize') self._step = tf.get_variable('step', [], initializer=tf.zeros_initializer(), dtype=tf.int32) self._total = tf.get_variable('total', [], initializer=tf.zeros_initializer(), dtype=tf.int32) self.network = self.make_network(self.spec.network_unit) if self.master.hyperparams.use_moving_average: self.moving_average = tf.train.ExponentialMovingAverage(decay=self.master.hyperparams.average_weight, num_updates=self._step) self.avg_ops = [self.moving_average.apply(self.network.params)]
'Makes a NetworkUnitInterface object based on the network_unit spec. Components may override this method to exert control over the network unit construction, such as which network units are supported. Args: network_unit: RegisteredModuleSpec proto defining the network unit. Returns: An implementation of NetworkUnitInterface. Raises: ValueError: if the requested network unit is not found in the registry.'
def make_network(self, network_unit):
network_type = network_unit.registered_name with tf.variable_scope(self.name): return network_units.NetworkUnitInterface.Create(network_type, self)
'Builds a training graph for this component. Two assumptions are made about the resulting graph: 1. An oracle will be used to unroll the state and compute the cost. 2. The graph will be differentiable when the cost is being minimized. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects. Returns: (state, cost, correct, total) -- These are TF ops corresponding to the final state after unrolling, the total cost, the total number of correctly predicted actions, and the total number of actions.'
@abstractmethod def build_greedy_training(self, state, network_states):
pass
'Builds a beam search based training loop for this component. The default implementation builds a dummy graph and raises a TensorFlow runtime exception to indicate that structured training is not implemented. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects. Returns: (handle, cost, correct, total) -- These are TF ops corresponding to the final handle after unrolling, the total cost, and the total number of actions. Since the number of correctly predicted actions is not applicable in the structured training setting, a dummy value should returned.'
def build_structured_training(self, state, network_states):
del network_states with tf.control_dependencies([tf.Assert(False, ['Not implemented.'])]): handle = tf.identity(state.handle) cost = tf.constant(0.0) (correct, total) = (tf.constant(0), tf.constant(0)) return (handle, cost, correct, total)
'Builds an inference graph for this component. If this graph is being constructed \'during_training\', then it needs to be differentiable even though it doesn\'t return an explicit cost. There may be other cases where the distinction between training and eval is important. The handling of dropout is an example of this. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects. during_training: whether the graph is being constructed during training Returns: Handle to the state once inference is complete for this Component.'
@abstractmethod def build_greedy_inference(self, state, network_states, during_training=False):
pass
'Constructs a set of summaries for this component. Returns: List of Summary ops to get parameter norms, progress reports, and so forth for this component.'
def get_summaries(self):
def combine_norm(matrices): squares = [tf.reduce_sum(tf.square(m)) for m in matrices if (m is not None)] if squares: return tf.sqrt(tf.add_n(squares)) else: return tf.constant(0, tf.float32) summaries = [] summaries.append(tf.summary.scalar(('%s step' % self.name), self._step)) summaries.append(tf.summary.scalar(('%s total' % self.name), self._total)) if self.network.params: summaries.append(tf.summary.scalar(('%s parameter Norm' % self.name), combine_norm(self.network.params))) slot_names = self.master.optimizer.get_slot_names() for name in slot_names: slot_params = [self.master.optimizer.get_slot(p, name) for p in self.network.params] summaries.append(tf.summary.scalar(('%s %s Norm' % (self.name, name)), combine_norm(slot_params))) if self.master.hyperparams.use_moving_average: summaries.append(tf.summary.scalar(('%s avg Norm' % self.name), combine_norm([self.moving_average.average(p) for p in self.network.params]))) return summaries
'Returns either the original or averaged version of a given variable. If the master.read_from_avg flag is set to True, and the ExponentialMovingAverage (EMA) object has been attached, then this will ask the EMA object for the given variable. This is to allow executing inference from the averaged version of parameters. Arguments: var_name: Name of the variable. var_params: tf.Variable for which to retrieve an average. Only one of |var_name| or |var_params| needs to be provided. If both are provided, |var_params| takes precedence. Returns: tf.Variable object corresponding to original or averaged version.'
def get_variable(self, var_name=None, var_params=None):
if var_params: var_name = var_params.name else: check.NotNone(var_name, 'specify at least one of var_name or var_params') var_params = tf.get_variable(var_name) if (self.moving_average and self.master.read_from_avg): logging.info('Retrieving average for: %s', var_name) var_params = self.moving_average.average(var_params) assert var_params logging.info('Returning: %s', var_params.name) return var_params
'Returns ops to advance the per-component step and total counters. Args: total: Total number of actions to increment counters by. Returns: tf.Group op incrementing \'step\' by 1 and \'total\' by total.'
def advance_counters(self, total):
update_total = tf.assign_add(self._total, total, use_locking=True) update_step = tf.assign_add(self._step, 1, use_locking=True) return tf.group(update_total, update_step)
'Adds L2 regularization for parameters which have it turned on. Args: cost: float cost before regularization. Returns: Updated cost optionally including regularization.'
def add_regularizer(self, cost):
if (self.network is None): return cost regularized_weights = self.network.get_l2_regularized_weights() if (not regularized_weights): return cost l2_coeff = self.master.hyperparams.l2_regularization_coefficient if (l2_coeff == 0.0): return cost tf.logging.info('[%s] Regularizing parameters: %s', self.name, [w.name for w in regularized_weights]) l2_costs = [tf.nn.l2_loss(p) for p in regularized_weights] return tf.add(cost, (l2_coeff * tf.add_n(l2_costs)), name='regularizer')
'Builds a post restore graph for this component. This is a run-once graph that prepares any state necessary for the inference portion of the component. It is generally a no-op. Returns: A no-op state.'
def build_post_restore_hook(self):
logging.info('Building default post restore hook for component: %s', self.spec.name) return tf.no_op(name=('setup_%s' % self.spec.name))
'Returns the value of the component attribute with the |name|.'
def attr(self, name):
return self._attrs[name]
'Builds a training loop for this component. This loop repeatedly evaluates the network and computes the loss, but it does not advance using the predictions of the network. Instead, it advances using the oracle defined in the underlying transition system. The final state will always correspond to the gold annotation. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: NetworkState object containing component TensorArrays. Returns: (state, cost, correct, total) -- These are TF ops corresponding to the final state after unrolling, the total cost, the total number of correctly predicted actions, and the total number of actions.'
def build_greedy_training(self, state, network_states):
logging.info('Building component: %s', self.spec.name) with tf.control_dependencies([tf.assert_equal(self.training_beam_size, 1)]): stride = (state.current_batch_size * self.training_beam_size) cost = tf.constant(0.0) correct = tf.constant(0) total = tf.constant(0) def cond(handle, *_): all_final = dragnn_ops.emit_all_final(handle, component=self.name) return tf.logical_not(tf.reduce_all(all_final)) def body(handle, cost, correct, total, *arrays): 'Runs the network and advances the state by a step.' with tf.control_dependencies(([handle, cost, correct, total] + [x.flow for x in arrays])): updated_state = MasterState(handle, state.current_batch_size) network_tensors = self._feedforward_unit(updated_state, arrays, network_states, stride, during_training=True) next_arrays = update_tensor_arrays(network_tensors, arrays) with tf.control_dependencies([x.flow for x in next_arrays]): with tf.name_scope('compute_loss'): gold = dragnn_ops.emit_oracle_labels(handle, component=self.name) gold.set_shape([None]) valid = tf.greater(gold, (-1)) valid_ix = tf.reshape(tf.where(valid), [(-1)]) gold = tf.gather(gold, valid_ix) logits = self.network.get_logits(network_tensors) logits = tf.gather(logits, valid_ix) cost += tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.cast(gold, tf.int64), logits=logits)) if (self.eligible_for_self_norm and (self.master.hyperparams.self_norm_alpha > 0)): log_z = tf.reduce_logsumexp(logits, [1]) cost += (self.master.hyperparams.self_norm_alpha * tf.nn.l2_loss(log_z)) correct += tf.reduce_sum(tf.to_int32(tf.nn.in_top_k(logits, gold, 1))) total += tf.size(gold) with tf.control_dependencies([cost, correct, total, gold]): handle = dragnn_ops.advance_from_oracle(handle, component=self.name) return ([handle, cost, correct, total] + next_arrays) with tf.name_scope((self.name + '/train_state')): init_arrays = [] for layer in self.network.layers: init_arrays.append(layer.create_array(state.current_batch_size)) output = tf.while_loop(cond, body, ([state.handle, cost, correct, total] + init_arrays), name=('train_%s' % self.name)) state.handle = output[0] correct = output[2] total = output[3] arrays = output[4:] cost = output[1] network_state = network_states[self.name] with tf.name_scope((self.name + '/stored_act')): for (index, layer) in enumerate(self.network.layers): network_state.activations[layer.name] = network_units.StoredActivations(array=arrays[index]) with tf.control_dependencies([tf.assert_greater(total, 0)]): cost /= tf.to_float(total) cost = self.add_regularizer(cost) with tf.control_dependencies([x.flow for x in arrays]): return (tf.identity(state.handle), cost, correct, total)
'Builds an inference loop for this component. Repeatedly evaluates the network and advances the underlying state according to the predicted scores. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: NetworkState object containing component TensorArrays. during_training: whether the graph is being constructed during training Returns: Handle to the state once inference is complete for this Component.'
def build_greedy_inference(self, state, network_states, during_training=False):
logging.info('Building component: %s', self.spec.name) if during_training: stride = (state.current_batch_size * self.training_beam_size) else: stride = (state.current_batch_size * self.inference_beam_size) def cond(handle, *_): all_final = dragnn_ops.emit_all_final(handle, component=self.name) return tf.logical_not(tf.reduce_all(all_final)) def body(handle, *arrays): 'Runs the network and advances the state by a step.' with tf.control_dependencies(([handle] + [x.flow for x in arrays])): updated_state = MasterState(handle, state.current_batch_size) network_tensors = self._feedforward_unit(updated_state, arrays, network_states, stride, during_training=during_training) next_arrays = update_tensor_arrays(network_tensors, arrays) with tf.control_dependencies([x.flow for x in next_arrays]): logits = self.network.get_logits(network_tensors) logits = tf.cond(self.locally_normalize, (lambda : tf.nn.log_softmax(logits)), (lambda : logits)) handle = dragnn_ops.advance_from_prediction(handle, logits, component=self.name) return ([handle] + next_arrays) with tf.name_scope((self.name + '/inference_state')): init_arrays = [] for layer in self.network.layers: init_arrays.append(layer.create_array(stride)) output = tf.while_loop(cond, body, ([state.handle] + init_arrays), name=('inference_%s' % self.name)) state.handle = output[0] arrays = output[1:] network_state = network_states[self.name] with tf.name_scope((self.name + '/stored_act')): for (index, layer) in enumerate(self.network.layers): network_state.activations[layer.name] = network_units.StoredActivations(array=arrays[index]) with tf.control_dependencies([x.flow for x in arrays]): return tf.identity(state.handle)
'Constructs a single instance of a feed-forward cell. Given an input state and access to the arrays storing activations, this function encapsulates creation of a single network unit. This will *not* create new variables. Args: state: MasterState for the state that will be used to extract features. arrays: List of TensorArrays corresponding to network outputs from this component. These are used for recurrent link features; the arrays from other components are used for stack-prop style connections. network_states: NetworkState object containing the TensorArrays from *all* components. stride: int Tensor with the current beam * batch size. during_training: Whether to build a unit for training (vs inference). Returns: List of tensors generated by the underlying network implementation.'
def _feedforward_unit(self, state, arrays, network_states, stride, during_training):
with tf.variable_scope(self.name, reuse=True): fixed_embeddings = [] for (channel_id, feature_spec) in enumerate(self.spec.fixed_feature): fixed_embedding = network_units.fixed_feature_lookup(self, state, channel_id, stride) if feature_spec.is_constant: fixed_embedding.tensor = tf.stop_gradient(fixed_embedding.tensor) fixed_embeddings.append(fixed_embedding) linked_embeddings = [] for (channel_id, feature_spec) in enumerate(self.spec.linked_feature): if (feature_spec.source_component == self.name): index = self.network.get_layer_index(feature_spec.source_layer) source_array = arrays[index] source_layer_size = self.network.layers[index].dim linked_embeddings.append(network_units.activation_lookup_recurrent(self, state, channel_id, source_array, source_layer_size, stride)) else: source = self.master.lookup_component[feature_spec.source_component] source_tensor = network_states[source.name].activations[feature_spec.source_layer] source_layer_size = source.network.get_layer_size(feature_spec.source_layer) linked_embeddings.append(network_units.activation_lookup_other(self, state, channel_id, source_tensor.dynamic_tensor, source_layer_size)) context_tensor_arrays = [] for context_layer in self.network.context_layers: index = self.network.get_layer_index(context_layer.name) context_tensor_arrays.append(arrays[index]) if self.spec.attention_component: logging.info('%s component has attention over %s', self.name, self.spec.attention_component) source = self.master.lookup_component[self.spec.attention_component] network_state = network_states[self.spec.attention_component] with tf.control_dependencies([tf.assert_equal(state.current_batch_size, 1)]): attention_tensor = tf.identity(network_state.activations['layer_0'].bulk_tensor) else: attention_tensor = None return self.network.create(fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training)
'Construct a new Composite optimizer. Args: optimizer1: A tf.python.training.optimizer.Optimizer object. optimizer2: A tf.python.training.optimizer.Optimizer object. switch: A tf.bool Tensor, selecting whether to use the first or the second optimizer. use_locking: Bool. If True apply use locks to prevent concurrent updates to variables. name: Optional name prefix for the operations created when applying gradients. Defaults to "Composite".'
def __init__(self, optimizer1, optimizer2, switch, use_locking=False, name='Composite'):
super(CompositeOptimizer, self).__init__(use_locking, name) self._optimizer1 = optimizer1 self._optimizer2 = optimizer2 self._switch = switch
'Initializes the MasterBuilder from specifications. During construction, all components are initialized along with their parameter tf.Variables. Args: master_spec: dragnn.MasterSpec proto. hyperparam_config: dragnn.GridPoint proto specifying hyperparameters. Defaults to empty specification. pool_scope: string identifier for the compute session pool to use. Raises: ValueError: if a component is not found in the registry.'
def __init__(self, master_spec, hyperparam_config=None, pool_scope='shared'):
self.spec = master_spec self.hyperparams = (spec_pb2.GridPoint() if (hyperparam_config is None) else hyperparam_config) self.pool_scope = pool_scope tf.set_random_seed(hyperparam_config.seed) self.components = [] self.lookup_component = {} for component_spec in master_spec.component: component_type = component_spec.component_builder.registered_name comp = component.ComponentBuilderBase.Create(component_type, self, component_spec) self.lookup_component[comp.name] = comp self.components.append(comp) self.master_vars = {} with tf.variable_scope('master', reuse=False): self.master_vars['step'] = tf.get_variable('step', [], initializer=tf.zeros_initializer(), dtype=tf.int32) self.master_vars['learning_rate'] = _create_learning_rate(self.hyperparams, self.master_vars['step']) self.optimizer = _create_optimizer(self.hyperparams, self.master_vars['learning_rate'], self.master_vars['step'])
'Returns a new ComputeSession handle.'
def _get_compute_session(self):
return dragnn_ops.get_session(self.pool_scope, master_spec=self.spec.SerializeToString(), grid_point=self.hyperparams.SerializeToString(), name='GetSession')
'Utility to create ComputeSession management ops. Creates a new ComputeSession handle and provides the following named nodes: ComputeSession/InputBatch -- a placeholder for attaching a string specification for AttachReader. ComputeSession/AttachReader -- the AttachReader op. Args: enable_tracing: bool, whether to enable tracing before attaching the data. Returns: handle: handle to a new ComputeSession returned by the AttachReader op. input_batch: InputBatch placeholder.'
def _get_session_with_reader(self, enable_tracing):
with tf.name_scope('ComputeSession'): input_batch = tf.placeholder(dtype=tf.string, shape=[None], name='InputBatch') handle = self._get_compute_session() if enable_tracing: handle = dragnn_ops.set_tracing(handle, True) handle = dragnn_ops.attach_data_reader(handle, input_batch, name='AttachReader') return (handle, input_batch)
'Ensures ComputeSession is released before outputs are returned. Args: handle: Handle to ComputeSession on which all computation until now has depended. It will be released and assigned to the output \'run\'. inputs: list of nodes we want to pass through without any dependencies. outputs: list of nodes whose access should ensure the ComputeSession is safely released. Returns: A dictionary of both input and output nodes.'
def _outputs_with_release(self, handle, inputs, outputs):
with tf.control_dependencies(outputs.values()): with tf.name_scope('ComputeSession'): release_op = dragnn_ops.release_session(handle) run_op = tf.group(release_op, name='run') for output in outputs: with tf.control_dependencies([release_op]): outputs[output] = tf.identity(outputs[output], name=output) all_nodes = inputs.copy() all_nodes.update(outputs) all_nodes['run'] = run_op return all_nodes
'Builds a training pipeline. Args: handle: Handle tensor for the ComputeSession. compute_gradients: Whether to generate gradients and an optimizer op. When False, build_training will return a \'dry run\' training op, used normally only for oracle tracing. use_moving_average: Whether or not to read from the moving average variables instead of the true parameters. Note: it is not possible to make gradient updates when this is True. advance_counters: Whether or not this loop should increment the per-component step counters. component_weights: If set, this is a list of relative weights each component\'s cost should get in the pipeline. Defaults to 1.0 for each component. unroll_using_oracle: If set, this is a list of booleans indicating whether or not to use the gold decodings for each component. Defaults to True for each component. max_index: Training will use only the first max_index components, or -1 for all components. Returns: handle: to the ComputeSession, conditioned on completing training step. outputs: a dictionary of useful training tensors. Raises: IndexError: if max_index is positive but out of bounds.'
def build_training(self, handle, compute_gradients=True, use_moving_average=False, advance_counters=True, component_weights=None, unroll_using_oracle=None, max_index=(-1)):
check.IsFalse((compute_gradients and use_moving_average), 'It is not possible to make gradient updates when reading from the moving average variables.') self.read_from_avg = use_moving_average if (max_index < 0): max_index = len(self.components) elif (not (0 < max_index <= len(self.components))): raise IndexError('Invalid max_index {} for components {}; handle {}'.format(max_index, self.component_names, handle.name)) if (not component_weights): component_weights = ([1] * max_index) if (not unroll_using_oracle): unroll_using_oracle = ([True] * max_index) component_weights = component_weights[:max_index] total_weight = float(sum(component_weights)) component_weights = [(w / total_weight) for w in component_weights] unroll_using_oracle = unroll_using_oracle[:max_index] logging.info('Creating training target:') logging.info(' DCTB Weights: %s', component_weights) logging.info(' DCTB Oracle: %s', unroll_using_oracle) metrics_list = [] cost = tf.constant(0.0) effective_batch = tf.constant(0) avg_ops = [] params_to_train = [] network_states = {} for component_index in range(0, max_index): comp = self.components[component_index] network_states[comp.name] = component.NetworkState() logging.info('Initializing data for component "%s"', comp.name) handle = dragnn_ops.init_component_data(handle, beam_size=comp.training_beam_size, component=comp.name) master_state = component.MasterState(handle, dragnn_ops.batch_size(handle, component=comp.name)) with tf.control_dependencies([handle, cost]): args = (master_state, network_states) if unroll_using_oracle[component_index]: (handle, component_cost, component_correct, component_total) = tf.cond((comp.training_beam_size > 1), (lambda : comp.build_structured_training(*args)), (lambda : comp.build_greedy_training(*args))) else: handle = comp.build_greedy_inference(during_training=True, *args) component_cost = tf.constant(0.0) (component_correct, component_total) = (tf.constant(0), tf.constant(0)) weighted_component_cost = tf.multiply(component_cost, tf.constant(float(component_weights[component_index])), name='weighted_component_cost') cost += weighted_component_cost effective_batch += component_total metrics_list += [[component_total], [component_correct]] if advance_counters: with tf.control_dependencies([comp.advance_counters(component_total)]): cost = tf.identity(cost) params_to_train += comp.network.params if self.hyperparams.use_moving_average: avg_ops += comp.avg_ops metrics = tf.concat(metrics_list, 0) if compute_gradients: logging.info('Creating train op with %d variables:\n DCTB %s', len(params_to_train), '\n DCTB '.join([x.name for x in params_to_train])) grads_and_vars = self.optimizer.compute_gradients(cost, var_list=params_to_train) clipped_gradients = [(self._clip_gradients(g), v) for (g, v) in grads_and_vars] minimize_op = self.optimizer.apply_gradients(clipped_gradients, global_step=self.master_vars['step']) if self.hyperparams.use_moving_average: with tf.control_dependencies([minimize_op]): minimize_op = tf.group(*avg_ops) with tf.control_dependencies([minimize_op]): handle = tf.identity(handle) self.read_from_avg = False outputs = {'cost': cost, 'batch': effective_batch, 'metrics': metrics} return (handle, outputs)
'Clips gradients if the hyperparameter `gradient_clip_norm` requires it. Sparse tensors, in the form of IndexedSlices returned for the gradients of embeddings, require special handling. Args: grad: Gradient Tensor, IndexedSlices, or None. Returns: Optionally clipped gradient.'
def _clip_gradients(self, grad):
if ((grad is not None) and (self.hyperparams.gradient_clip_norm > 0)): logging.info('Clipping gradient %s', grad) if isinstance(grad, tf.IndexedSlices): tmp = tf.clip_by_norm(grad.values, self.hyperparams.gradient_clip_norm) return tf.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: return tf.clip_by_norm(grad, self.hyperparams.gradient_clip_norm) else: return grad
'Builds a graph that should be executed after the restore op. This graph is intended to be run once, before the inference pipeline is run. Returns: setup_op - An op that, when run, guarantees all setup ops will run.'
def build_post_restore_hook(self):
with tf.control_dependencies([comp.build_post_restore_hook() for comp in self.components]): return tf.no_op(name='post_restore_hook_master')
'Builds an inference pipeline. This always uses the whole pipeline. Args: handle: Handle tensor for the ComputeSession. use_moving_average: Whether or not to read from the moving average variables instead of the true parameters. Note: it is not possible to make gradient updates when this is True. Returns: handle: Handle after annotation.'
def build_inference(self, handle, use_moving_average=False):
self.read_from_avg = use_moving_average network_states = {} for comp in self.components: network_states[comp.name] = component.NetworkState() handle = dragnn_ops.init_component_data(handle, beam_size=comp.inference_beam_size, component=comp.name) master_state = component.MasterState(handle, dragnn_ops.batch_size(handle, component=comp.name)) with tf.control_dependencies([handle]): handle = comp.build_greedy_inference(master_state, network_states) handle = dragnn_ops.write_annotations(handle, component=comp.name) self.read_from_avg = False return handle
'Constructs a training pipeline from a TrainTarget proto. This constructs a separately managed pipeline for a given target: it has its own ComputeSession, InputSpec placeholder, etc. The ops are given standardized names to allow access from the C++ API. It passes the values in target_config to build_training() above. For the default prefix (\'train-\'), and a target named \'target\', this will construct the following targets in the graph: train-target/ComputeSession/* (the standard ComputeSession controls) train-target/run (handle to a completed training step) train-target/metrics (per-decision metrics from gold oracles) train-target/cost (total cost across all components) Enabling `trace_only` effectively creates a graph that is a \'dry run\'. There will be no side affects. In addition, the gradients won\'t be computed and the model parameters will not be updated. Args: target_config: the TrainTarget proto. prefix: Preprends target_config.name with this to construct a unique identifier. trace_only: Enabling this will result in: 1. Tracing will be enabled for the ComputeSession.. 2. A \'traces\' node will be added to the outputs. 3. Gradients will not be computed. **kwargs: Passed on to build_training() above. Returns: Dictionary of training targets.'
def add_training_from_config(self, target_config, prefix='train-', trace_only=False, **kwargs):
logging.info('Creating new training target %s from config: %s', target_config.name, str(target_config)) scope_id = (prefix + target_config.name) with tf.name_scope(scope_id): (handle, input_batch) = self._get_session_with_reader(trace_only) (handle, outputs) = self.build_training(handle, compute_gradients=(not trace_only), advance_counters=(not trace_only), component_weights=target_config.component_weights, unroll_using_oracle=target_config.unroll_using_oracle, max_index=target_config.max_index, **kwargs) if trace_only: outputs['traces'] = dragnn_ops.get_component_trace(handle, component=self.spec.component[(-1)].name) else: outputs['target_step'] = tf.get_variable((scope_id + '/TargetStep'), [], initializer=tf.zeros_initializer(), dtype=tf.int32) increment_target_step = tf.assign_add(outputs['target_step'], 1, use_locking=True) with tf.control_dependencies([increment_target_step]): handle = tf.identity(handle) return self._outputs_with_release(handle, {'input_batch': input_batch}, outputs)
'Adds an annotation pipeline to the graph. This will create the following additional named targets by default, for use in C++ annotation code (as well as regular ComputeSession targets): annotation/ComputeSession/session_id (placeholder for giving unique id) annotation/EmitAnnotations (get annotated data) annotation/GetComponentTrace (get trace data) annotation/SetTracing (sets tracing based on annotation/tracing_on) Args: name_scope: Scope for the annotation pipeline. enable_tracing: Enabling this will result in two things: 1. Tracing will be enabled during inference. 2. A \'traces\' node will be added to the outputs. Returns: A dictionary of input and output nodes.'
def add_annotation(self, name_scope='annotation', enable_tracing=False):
with tf.name_scope(name_scope): (handle, input_batch) = self._get_session_with_reader(enable_tracing) handle = self.build_inference(handle, use_moving_average=True) annotations = dragnn_ops.emit_annotations(handle, component=self.spec.component[(-1)].name) outputs = {'annotations': annotations} if enable_tracing: outputs['traces'] = dragnn_ops.get_component_trace(handle, component=self.spec.component[(-1)].name) return self._outputs_with_release(handle, {'input_batch': input_batch}, outputs)
'Adds the post restore ops.'
def add_post_restore_hook(self, name_scope):
with tf.name_scope(name_scope): return self.build_post_restore_hook()
'Adds a Saver for all variables in the graph.'
def add_saver(self):
logging.info('Saving non-quantized variables:\n DCTB %s', '\n DCTB '.join([x.name for x in tf.global_variables() if ('quantized' not in x.name)])) self.saver = tf.train.Saver(var_list=[x for x in tf.global_variables() if ('quantized' not in x.name)], write_version=saver_pb2.SaverDef.V1)
'Initializes the ComponentSpec with some defaults for SyntaxNet. Args: name: The name of this Component in the pipeline. builder: The component builder type. backend: The component backend type.'
def __init__(self, name, builder='DynamicComponentBuilder', backend='SyntaxNetComponent'):
self.spec = spec_pb2.ComponentSpec(name=name, backend=self.make_module(backend), component_builder=self.make_module(builder))
'Forwards kwargs to easily created a RegisteredModuleSpec. Note: all kwargs should be string-valued. Args: name: The registered name of the module. **kwargs: Proto fields to be specified in the module. Returns: Newly created RegisteredModuleSpec.'
def make_module(self, name, **kwargs):
return spec_pb2.RegisteredModuleSpec(registered_name=name, parameters=kwargs)
'Returns the default source_layer setting for this ComponentSpec. Usually links are intended for a specific layer in the network unit. For common network units, this returns the hidden layer intended to be read by recurrent and cross-component connections. Returns: String name of default network layer. Raises: ValueError: if no default is known for the given setup.'
def default_source_layer(self):
for (network, default_layer) in [('FeedForwardNetwork', 'layer_0'), ('LayerNormBasicLSTMNetwork', 'state_h_0'), ('LSTMNetwork', 'layer_0'), ('IdentityNetwork', 'input_embeddings')]: if self.spec.network_unit.registered_name.endswith(network): return default_layer raise ValueError(('No default source for network unit: %s' % self.spec.network_unit))
'Returns the default source_translator setting for token representations. Most links are token-based: given a target token index, retrieve a learned representation for that token from this component. This depends on the transition system; e.g. we should make sure that left-to-right sequence models reverse the incoming token index when looking up representations from a right-to-left model. Returns: String name of default translator for this transition system. Raises: ValueError: if no default is known for the given setup.'
def default_token_translator(self):
transition_spec = self.spec.transition_system if (transition_spec.registered_name == 'arc-standard'): return 'shift-reduce-step' if (transition_spec.registered_name in ('shift-only', 'tagger')): if ('left_to_right' in transition_spec.parameters): if (transition_spec.parameters['left_to_right'] == 'false'): return 'reverse-token' return 'identity' raise ValueError(('Invalid transition spec: %s' % str(transition_spec)))
'Adds a link to source\'s token representations using default settings. Constructs a LinkedFeatureChannel proto and adds it to the spec, using defaults to assign the name, component, translator, and layer of the channel. The user must provide fml and embedding_dim. Args: source: SyntaxComponentBuilder object to pull representations from. source_layer: Optional override for a source layer instead of the default. **kwargs: Forwarded arguments to the LinkedFeatureChannel proto.'
def add_token_link(self, source=None, source_layer=None, **kwargs):
if (source_layer is None): source_layer = source.default_source_layer() self.spec.linked_feature.add(name=source.spec.name, source_component=source.spec.name, source_layer=source_layer, source_translator=source.default_token_translator(), **kwargs)
'Adds a recurrent link to this component using default settings. This adds the connection to the previous time step only to the network. It constructs a LinkedFeatureChannel proto and adds it to the spec, using defaults to assign the name, component, translator, and layer of the channel. The user must provide the embedding_dim only. Args: source_layer: Optional override for a source layer instead of the default. **kwargs: Forwarded arguments to the LinkedFeatureChannel proto.'
def add_rnn_link(self, source_layer=None, **kwargs):
if (source_layer is None): source_layer = self.default_source_layer() self.spec.linked_feature.add(name='rnn', source_layer=source_layer, source_component=self.spec.name, source_translator='history', fml='constant', **kwargs)
'Shorthand to set transition_system using kwargs.'
def set_transition_system(self, *args, **kwargs):
self.spec.transition_system.CopyFrom(self.make_module(*args, **kwargs))
'Shorthand to set network_unit using kwargs.'
def set_network_unit(self, *args, **kwargs):
self.spec.network_unit.CopyFrom(self.make_module(*args, **kwargs))
'Shorthand to add a fixed_feature using kwargs.'
def add_fixed_feature(self, **kwargs):
self.spec.fixed_feature.add(**kwargs)
'Add a link using default naming and layers only.'
def add_link(self, source, source_layer=None, source_translator='identity', name=None, **kwargs):
if (source_layer is None): source_layer = source.default_source_layer() if (name is None): name = source.spec.name self.spec.linked_feature.add(source_component=source.spec.name, source_layer=source_layer, name=name, source_translator=source_translator, **kwargs)
'Fills in feature sizes and vocabularies using SyntaxNet lexicon. Must be called before the spec is ready to be used to build TensorFlow graphs. Requires a SyntaxNet lexicon built at the resource_path. Using the lexicon, this will call the SyntaxNet custom ops to return the number of features and vocabulary sizes based on the FML specifications and the lexicons. It will also compute the number of actions of the transition system. This will often CHECK-fail if the spec doesn\'t correspond to a valid transition system or feature setup. Args: resource_path: Path to the lexicon. tf_master: TensorFlow master executor (string, defaults to \'\' to use the local instance).'
def fill_from_resources(self, resource_path, tf_master=''):
check.IsTrue(self.spec.transition_system.registered_name, 'Set a transition system before calling fill_from_resources().') context = lexicon.create_lexicon_context(resource_path) for (key, value) in self.spec.transition_system.parameters.iteritems(): context.parameter.add(name=key, value=value) context.parameter.add(name='brain_parser_embedding_dims', value=';'.join([str(x.embedding_dim) for x in self.spec.fixed_feature])) context.parameter.add(name='brain_parser_features', value=';'.join([x.fml for x in self.spec.fixed_feature])) context.parameter.add(name='brain_parser_predicate_maps', value=';'.join(['' for x in self.spec.fixed_feature])) context.parameter.add(name='brain_parser_embedding_names', value=';'.join([x.name for x in self.spec.fixed_feature])) context.parameter.add(name='brain_parser_transition_system', value=self.spec.transition_system.registered_name) with tf.Session(tf_master) as sess: (feature_sizes, domain_sizes, _, num_actions) = sess.run(gen_parser_ops.feature_size(task_context_str=str(context))) self.spec.num_actions = int(num_actions) for i in xrange(len(feature_sizes)): self.spec.fixed_feature[i].size = int(feature_sizes[i]) self.spec.fixed_feature[i].vocabulary_size = int(domain_sizes[i]) for i in xrange(len(self.spec.linked_feature)): self.spec.linked_feature[i].size = len(self.spec.linked_feature[i].fml.split(' ')) for resource in context.input: self.spec.resource.add(name=resource.name).part.add(file_pattern=resource.part[0].file_pattern)
'Returns attrs based on the |defaults| and one |key|,|value| override.'
def MakeAttrs(self, defaults, key=None, value=None):
spec = spec_pb2.RegisteredModuleSpec() if (key and value): spec.parameters[key] = value return network_units.get_attrs_with_defaults(spec.parameters, defaults)
'Extracts features and advances a batch using the oracle path. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects Returns: state handle: final state after advancing cost: regularization cost, possibly associated with embedding matrices correct: since no gold path is available, 0. total: since no gold path is available, 0.'
def build_greedy_training(self, state, network_states):
logging.info('Building component: %s', self.spec.name) stride = (state.current_batch_size * self.training_beam_size) with tf.variable_scope(self.name, reuse=True): (state.handle, fixed_embeddings) = fetch_differentiable_fixed_embeddings(self, state, stride) linked_embeddings = [fetch_linked_embedding(self, network_states, spec) for spec in self.spec.linked_feature] with tf.variable_scope(self.name, reuse=True): tensors = self.network.create(fixed_embeddings, linked_embeddings, None, None, True, stride=stride) update_network_states(self, tensors, network_states, stride) cost = self.add_regularizer(tf.constant(0.0)) (correct, total) = (tf.constant(0), tf.constant(0)) return (state.handle, cost, correct, total)
'Extracts features and advances a batch using the oracle path. NOTE(danielandor) For now this method cannot be called during training. That is to say, unroll_using_oracle for this component must be set to true. This will be fixed by separating train_with_oracle and train_with_inference. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects during_training: whether the graph is being constructed during training Returns: state handle: final state after advancing'
def build_greedy_inference(self, state, network_states, during_training=False):
logging.info('Building component: %s', self.spec.name) if during_training: stride = (state.current_batch_size * self.training_beam_size) else: stride = (state.current_batch_size * self.inference_beam_size) with tf.variable_scope(self.name, reuse=True): if during_training: (state.handle, fixed_embeddings) = fetch_differentiable_fixed_embeddings(self, state, stride) else: (state.handle, fixed_embeddings) = fetch_fast_fixed_embeddings(self, state) linked_embeddings = [fetch_linked_embedding(self, network_states, spec) for spec in self.spec.linked_feature] with tf.variable_scope(self.name, reuse=True): tensors = self.network.create(fixed_embeddings, linked_embeddings, None, None, during_training=during_training, stride=stride) update_network_states(self, tensors, network_states, stride) return state.handle
'Initializes the feature ID extractor component. Args: master: dragnn.MasterBuilder object. component_spec: dragnn.ComponentSpec proto to be built.'
def __init__(self, master, component_spec):
super(BulkFeatureIdExtractorComponentBuilder, self).__init__(master, component_spec) check.Eq(len(self.spec.linked_feature), 0, 'Linked features are forbidden') for feature_spec in self.spec.fixed_feature: check.Lt(feature_spec.embedding_dim, 0, ('Features must be non-embedded: %s' % feature_spec))
'See base class.'
def build_greedy_training(self, state, network_states):
state.handle = self._extract_feature_ids(state, network_states, True) cost = self.add_regularizer(tf.constant(0.0)) (correct, total) = (tf.constant(0), tf.constant(0)) return (state.handle, cost, correct, total)
'See base class.'
def build_greedy_inference(self, state, network_states, during_training=False):
return self._extract_feature_ids(state, network_states, during_training)
'Extracts feature IDs and advances a batch using the oracle path. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: Dictionary of component NetworkState objects. during_training: Whether the graph is being constructed during training. Returns: state handle: Final state after advancing.'
def _extract_feature_ids(self, state, network_states, during_training):
logging.info('Building component: %s', self.spec.name) if during_training: stride = (state.current_batch_size * self.training_beam_size) else: stride = (state.current_batch_size * self.inference_beam_size) with tf.variable_scope(self.name, reuse=True): (state.handle, ids) = extract_fixed_feature_ids(self, state, stride) with tf.variable_scope(self.name, reuse=True): tensors = self.network.create(ids, [], None, None, during_training, stride=stride) update_network_states(self, tensors, network_states, stride) return state.handle
'Advances a batch using oracle paths, returning the overall CE cost. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects Returns: (state handle, cost, correct, total): TF ops corresponding to the final state after unrolling, the total cost, the total number of correctly predicted actions, and the total number of actions. Raises: RuntimeError: if fixed features are configured.'
def build_greedy_training(self, state, network_states):
logging.info('Building component: %s', self.spec.name) if self.spec.fixed_feature: raise RuntimeError('Fixed features are not compatible with bulk annotation. Use the "bulk-features" component instead.') linked_embeddings = [fetch_linked_embedding(self, network_states, spec) for spec in self.spec.linked_feature] stride = (state.current_batch_size * self.training_beam_size) with tf.variable_scope(self.name, reuse=True): network_tensors = self.network.create([], linked_embeddings, None, None, True, stride) update_network_states(self, network_tensors, network_states, stride) logits = self.network.get_logits(network_tensors) (state.handle, gold) = dragnn_ops.bulk_advance_from_oracle(state.handle, component=self.name) (cost, correct, total) = build_cross_entropy_loss(logits, gold) cost = self.add_regularizer(cost) return (state.handle, cost, correct, total)
'Annotates a batch of documents using network scores. Args: state: MasterState from the \'AdvanceMaster\' op that advances the underlying master to this component. network_states: dictionary of component NetworkState objects during_training: whether the graph is being constructed during training Returns: Handle to the state once inference is complete for this Component. Raises: RuntimeError: if fixed features are configured'
def build_greedy_inference(self, state, network_states, during_training=False):
logging.info('Building component: %s', self.spec.name) if self.spec.fixed_feature: raise RuntimeError('Fixed features are not compatible with bulk annotation. Use the "bulk-features" component instead.') linked_embeddings = [fetch_linked_embedding(self, network_states, spec) for spec in self.spec.linked_feature] if during_training: stride = (state.current_batch_size * self.training_beam_size) else: stride = (state.current_batch_size * self.inference_beam_size) with tf.variable_scope(self.name, reuse=True): network_tensors = self.network.create([], linked_embeddings, None, None, during_training, stride) update_network_states(self, network_tensors, network_states, stride) logits = self.network.get_logits(network_tensors) return dragnn_ops.bulk_advance_from_prediction(state.handle, logits, component=self.name)
'Initializes the LSTM base class. Parameters used: hidden_layer_sizes: Comma-delimited number of hidden units for each layer. input_dropout_rate (-1.0): Input dropout rate for each layer. If < 0.0, use the global |dropout_rate| hyperparameter. recurrent_dropout_rate (0.8): Recurrent dropout rate. If < 0.0, use the global |recurrent_dropout_rate| hyperparameter. layer_norm (True): Whether or not to use layer norm. Hyperparameters used: dropout_rate: Input dropout rate. recurrent_dropout_rate: Recurrent dropout rate. Args: component: parent ComponentBuilderBase object. additional_attr_defaults: Additional attributes for use by derived class.'
def __init__(self, component, additional_attr_defaults=None):
attr_defaults = (additional_attr_defaults or {}) attr_defaults.update({'layer_norm': True, 'input_dropout_rate': (-1.0), 'recurrent_dropout_rate': 0.8, 'hidden_layer_sizes': '256'}) self._attrs = dragnn.get_attrs_with_defaults(component.spec.network_unit.parameters, defaults=attr_defaults) self._hidden_layer_sizes = map(int, self._attrs['hidden_layer_sizes'].split(',')) self._input_dropout_rate = self._attrs['input_dropout_rate'] if (self._input_dropout_rate < 0.0): self._input_dropout_rate = component.master.hyperparams.dropout_rate self._recurrent_dropout_rate = self._attrs['recurrent_dropout_rate'] if (self._recurrent_dropout_rate < 0.0): self._recurrent_dropout_rate = component.master.hyperparams.recurrent_dropout_rate if (self._recurrent_dropout_rate < 0.0): self._recurrent_dropout_rate = component.master.hyperparams.dropout_rate tf.logging.info('[%s] input_dropout_rate=%s recurrent_dropout_rate=%s', component.name, self._input_dropout_rate, self._recurrent_dropout_rate) (layers, context_layers) = self.create_hidden_layers(component, self._hidden_layer_sizes) last_layer_dim = layers[(-1)].dim layers.append(dragnn.Layer(component, name='last_layer', dim=last_layer_dim)) layers.append(dragnn.Layer(component, name='logits', dim=component.num_actions)) super(BaseLSTMNetwork, self).__init__(component, init_layers=layers, init_context_layers=context_layers) self._params.append(tf.get_variable('weights_softmax', [last_layer_dim, component.num_actions], initializer=tf.random_normal_initializer(stddev=0.0001))) self._params.append(tf.get_variable('bias_softmax', [component.num_actions], initializer=tf.zeros_initializer()))
'Returns the logits for prediction.'
def get_logits(self, network_tensors):
return network_tensors[self.get_layer_index('logits')]
'Creates hidden network layers. Args: component: Parent ComponentBuilderBase object. hidden_layer_sizes: List of requested hidden layer activation sizes. Returns: layers: List of layers created by this network. context_layers: List of context layers created by this network.'
@abc.abstractmethod def create_hidden_layers(self, component, hidden_layer_sizes):
pass
'Appends layers defined by the base class to the |hidden_layers|.'
def _append_base_layers(self, hidden_layers):
last_layer = hidden_layers[(-1)] logits = tf.nn.xw_plus_b(last_layer, self._component.get_variable('weights_softmax'), self._component.get_variable('bias_softmax')) return (hidden_layers + [last_layer, logits])
'Creates a single LSTM cell, possibly with dropout. Requires that BaseLSTMNetwork.__init__() was called. Args: num_units: Number of hidden units in the cell. during_training: Whether to create a cell for training (vs inference). Returns: A RNNCell of the requested size, possibly with dropout.'
def _create_cell(self, num_units, during_training):
if (not during_training): return tf.contrib.rnn.LayerNormBasicLSTMCell(num_units, layer_norm=self._attrs['layer_norm'], reuse=True) cell = tf.contrib.rnn.LayerNormBasicLSTMCell(num_units, dropout_keep_prob=self._recurrent_dropout_rate, layer_norm=self._attrs['layer_norm']) cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=self._input_dropout_rate) return cell
'Creates a list of LSTM cells for training.'
def _create_train_cells(self):
return [self._create_cell(num_units, during_training=True) for num_units in self._hidden_layer_sizes]
'Creates a list of LSTM cells for inference.'
def _create_inference_cells(self):
return [self._create_cell(num_units, during_training=False) for num_units in self._hidden_layer_sizes]
'Captures variables created by a function in |self._params|. Args: function: Function whose variables should be captured. The function should take one argument, its enclosing variable scope.'
def _capture_variables_as_params(self, function):
created_vars = {} def _custom_getter(getter, *args, **kwargs): 'Calls the real getter and captures its result in |created_vars|.' real_variable = getter(*args, **kwargs) created_vars[real_variable.name] = real_variable return real_variable with tf.variable_scope('cell', reuse=None, custom_getter=_custom_getter) as scope: function(scope) self._params.extend(created_vars.values())
'Applies a function using previously-captured variables. Args: function: Function to apply using captured variables. The function should take one argument, its enclosing variable scope. Returns: Results of function application.'
def _apply_with_captured_variables(self, function):
def _custom_getter(getter, *args, **kwargs): 'Retrieves the normal or moving-average variables.' return self._component.get_variable(var_params=getter(*args, **kwargs)) with tf.variable_scope('cell', reuse=True, custom_getter=_custom_getter) as scope: return function(scope)
'Sets up context and output layers, as well as a final softmax.'
def __init__(self, component):
super(LayerNormBasicLSTMNetwork, self).__init__(component) self._train_cell = tf.contrib.rnn.MultiRNNCell(self._create_train_cells()) self._inference_cell = tf.contrib.rnn.MultiRNNCell(self._create_inference_cells()) def _cell_closure(scope): 'Applies the LSTM cell to placeholder inputs and state.' placeholder_inputs = tf.placeholder(dtype=tf.float32, shape=(1, self._concatenated_input_dim)) placeholder_substates = [] for num_units in self._hidden_layer_sizes: placeholder_substate = tf.contrib.rnn.LSTMStateTuple(tf.placeholder(dtype=tf.float32, shape=(1, num_units)), tf.placeholder(dtype=tf.float32, shape=(1, num_units))) placeholder_substates.append(placeholder_substate) placeholder_state = tuple(placeholder_substates) self._train_cell(inputs=placeholder_inputs, state=placeholder_state, scope=scope) self._capture_variables_as_params(_cell_closure)
'See base class.'
def create_hidden_layers(self, component, hidden_layer_sizes):
layers = [] for (index, num_units) in enumerate(hidden_layer_sizes): layers.append(dragnn.Layer(component, name=('state_c_%d' % index), dim=num_units)) layers.append(dragnn.Layer(component, name=('state_h_%d' % index), dim=num_units)) context_layers = list(layers) return (layers, context_layers)
'See base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
check.Eq(len(context_tensor_arrays), (2 * len(self._hidden_layer_sizes)), 'require two context tensors per hidden layer') length = context_tensor_arrays[0].size() substates = [] for (index, num_units) in enumerate(self._hidden_layer_sizes): state_c = context_tensor_arrays[(2 * index)].read((length - 1)) state_h = context_tensor_arrays[((2 * index) + 1)].read((length - 1)) state_c.set_shape([tf.Dimension(None), num_units]) state_h.set_shape([tf.Dimension(None), num_units]) substates.append(tf.contrib.rnn.LSTMStateTuple(state_c, state_h)) state = tuple(substates) input_tensor = dragnn.get_input_tensor(fixed_embeddings, linked_embeddings) cell = (self._train_cell if during_training else self._inference_cell) def _cell_closure(scope): 'Applies the LSTM cell to the current inputs and state.' return cell(input_tensor, state, scope) (unused_h, state) = self._apply_with_captured_variables(_cell_closure) output_tensors = [] for new_substate in state: (new_c, new_h) = new_substate output_tensors.append(new_c) output_tensors.append(new_h) return self._append_base_layers(output_tensors)
'Initializes the bulk bi-LSTM. Parameters used: parallel_iterations (1): Parallelism of the underlying tf.while_loop(). Defaults to 1 thread to encourage deterministic behavior, but can be increased to trade memory for speed. Args: component: parent ComponentBuilderBase object.'
def __init__(self, component):
super(BulkBiLSTMNetwork, self).__init__(component, additional_attr_defaults={'parallel_iterations': 1}) check.In('lengths', self._linked_feature_dims, 'Missing required linked feature') check.Eq(self._linked_feature_dims['lengths'], 1, 'Wrong dimension for "lengths" feature') self._input_dim = (self._concatenated_input_dim - 1) self._output_dim = self.get_layer_size('outputs') tf.logging.info('[%s] Bulk bi-LSTM with input_dim=%d output_dim=%d', component.name, self._input_dim, self._output_dim) self._train_cells_forward = self._create_train_cells() self._train_cells_backward = self._create_train_cells() self._inference_cells_forward = self._create_inference_cells() self._inference_cells_backward = self._create_inference_cells() def _bilstm_closure(scope): 'Applies the bi-LSTM to placeholder inputs and lengths.' (stride, steps) = (1, 1) placeholder_inputs = tf.placeholder(dtype=tf.float32, shape=[stride, steps, self._input_dim]) placeholder_lengths = tf.placeholder(dtype=tf.int64, shape=[stride]) tf.contrib.rnn.stack_bidirectional_dynamic_rnn(self._train_cells_forward, self._train_cells_backward, placeholder_inputs, dtype=tf.float32, sequence_length=placeholder_lengths, scope=scope) self._capture_variables_as_params(_bilstm_closure) for (index, num_units) in enumerate(self._hidden_layer_sizes): for direction in ['forward', 'backward']: for substate in ['c', 'h']: self._params.append(tf.get_variable(('initial_state_%s_%s_%d' % (direction, substate, index)), [1, num_units], dtype=tf.float32, initializer=tf.constant_initializer(0.0)))
'See base class.'
def create_hidden_layers(self, component, hidden_layer_sizes):
dim = (2 * hidden_layer_sizes[(-1)]) return ([dragnn.Layer(component, name='outputs', dim=dim)], [])
'Requires |stride|; otherwise see base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
check.NotNone(stride, 'BulkBiLSTMNetwork requires "stride" and must be called in the bulk feature extractor component.') lengths = dragnn.lookup_named_tensor('lengths', linked_embeddings) lengths_s = tf.squeeze(lengths.tensor, [1]) linked_embeddings = [named_tensor for named_tensor in linked_embeddings if (named_tensor.name != 'lengths')] inputs_sxnxd = dragnn.get_input_tensor_with_stride(fixed_embeddings, linked_embeddings, stride) inputs_sxnxd.set_shape([tf.Dimension(None), tf.Dimension(None), self._input_dim]) (initial_states_forward, initial_states_backward) = self._create_initial_states(stride) if during_training: cells_forward = self._train_cells_forward cells_backward = self._train_cells_backward else: cells_forward = self._inference_cells_forward cells_backward = self._inference_cells_backward def _bilstm_closure(scope): 'Applies the bi-LSTM to the current inputs.' (outputs_sxnxd, _, _) = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(cells_forward, cells_backward, inputs_sxnxd, initial_states_fw=initial_states_forward, initial_states_bw=initial_states_backward, sequence_length=lengths_s, parallel_iterations=self._attrs['parallel_iterations'], scope=scope) return outputs_sxnxd outputs_sxnxd = self._apply_with_captured_variables(_bilstm_closure) outputs_snxd = tf.reshape(outputs_sxnxd, [(-1), self._output_dim]) return self._append_base_layers([outputs_snxd])
'Returns stacked and batched initial states for the bi-LSTM.'
def _create_initial_states(self, stride):
initial_states_forward = [] initial_states_backward = [] for index in range(len(self._hidden_layer_sizes)): states_sxd = [] for direction in ['forward', 'backward']: for substate in ['c', 'h']: state_1xd = self._component.get_variable(('initial_state_%s_%s_%d' % (direction, substate, index))) state_sxd = tf.tile(state_1xd, [stride, 1]) states_sxd.append(state_sxd) initial_states_forward.append(tf.contrib.rnn.LSTMStateTuple(states_sxd[0], states_sxd[1])) initial_states_backward.append(tf.contrib.rnn.LSTMStateTuple(states_sxd[2], states_sxd[3])) return (initial_states_forward, initial_states_backward)
'Initializes weights and layers. Args: component: Parent ComponentBuilderBase object.'
def __init__(self, component):
super(BiaffineDigraphNetwork, self).__init__(component) check.Eq(len(self._fixed_feature_dims.items()), 0, 'Expected no fixed features') check.Eq(len(self._linked_feature_dims.items()), 2, 'Expected two linked features') check.In('sources', self._linked_feature_dims, 'Missing required linked feature') check.In('targets', self._linked_feature_dims, 'Missing required linked feature') self._source_dim = self._linked_feature_dims['sources'] self._target_dim = self._linked_feature_dims['targets'] self._weights = [] self._weights.append(tf.get_variable('weights_arc', [self._source_dim, self._target_dim], tf.float32, tf.random_normal_initializer(stddev=0.0001))) self._weights.append(tf.get_variable('weights_source', [self._source_dim], tf.float32, tf.random_normal_initializer(stddev=0.0001))) self._weights.append(tf.get_variable('root', [self._source_dim], tf.float32, tf.random_normal_initializer(stddev=0.0001))) self._params.extend(self._weights) self._regularized_weights.extend(self._weights) self._layers.append(network_units.Layer(self, 'adjacency', (-1)))
'Requires |stride|; otherwise see base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
check.NotNone(stride, 'BiaffineDigraphNetwork requires "stride" and must be called in the bulk feature extractor component.') del during_training weights_arc = self._component.get_variable('weights_arc') weights_source = self._component.get_variable('weights_source') root = self._component.get_variable('root') sources = network_units.lookup_named_tensor('sources', linked_embeddings) targets = network_units.lookup_named_tensor('targets', linked_embeddings) source_tokens_bxnxs = tf.reshape(sources.tensor, [stride, (-1), self._source_dim]) target_tokens_bxnxt = tf.reshape(targets.tensor, [stride, (-1), self._target_dim]) num_tokens = tf.shape(source_tokens_bxnxs)[1] arcs_bxnxn = digraph_ops.ArcPotentialsFromTokens(source_tokens_bxnxs, target_tokens_bxnxt, weights_arc) sources_bxnxn = digraph_ops.ArcSourcePotentialsFromTokens(source_tokens_bxnxs, weights_source) roots_bxn = digraph_ops.RootPotentialsFromTokens(root, target_tokens_bxnxt, weights_arc) adjacency_bxnxn = digraph_ops.CombineArcAndRootPotentials((arcs_bxnxn + sources_bxnxn), roots_bxn) return [tf.reshape(adjacency_bxnxn, [(-1), num_tokens])]
'Initializes weights and layers. Args: component: Parent ComponentBuilderBase object.'
def __init__(self, component):
super(BiaffineLabelNetwork, self).__init__(component) parameters = component.spec.network_unit.parameters self._num_labels = int(parameters['num_labels']) check.Gt(self._num_labels, 0, 'Expected some labels') check.Eq(len(self._fixed_feature_dims.items()), 0, 'Expected no fixed features') check.Eq(len(self._linked_feature_dims.items()), 2, 'Expected two linked features') check.In('sources', self._linked_feature_dims, 'Missing required linked feature') check.In('targets', self._linked_feature_dims, 'Missing required linked feature') self._source_dim = self._linked_feature_dims['sources'] self._target_dim = self._linked_feature_dims['targets'] self._weights = [] self._weights.append(tf.get_variable('weights_pair', [self._num_labels, self._source_dim, self._target_dim], tf.float32, tf.random_normal_initializer(stddev=0.0001))) self._weights.append(tf.get_variable('weights_source', [self._num_labels, self._source_dim], tf.float32, tf.random_normal_initializer(stddev=0.0001))) self._weights.append(tf.get_variable('weights_target', [self._num_labels, self._target_dim], tf.float32, tf.random_normal_initializer(stddev=0.0001))) self._biases = [] self._biases.append(tf.get_variable('biases', [self._num_labels], tf.float32, tf.random_normal_initializer(stddev=0.0001))) self._params.extend((self._weights + self._biases)) self._regularized_weights.extend(self._weights) self._layers.append(network_units.Layer(self, 'labels', self._num_labels))
'Requires |stride|; otherwise see base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
check.NotNone(stride, 'BiaffineLabelNetwork requires "stride" and must be called in the bulk feature extractor component.') del during_training weights_pair = self._component.get_variable('weights_pair') weights_source = self._component.get_variable('weights_source') weights_target = self._component.get_variable('weights_target') biases = self._component.get_variable('biases') sources = network_units.lookup_named_tensor('sources', linked_embeddings) targets = network_units.lookup_named_tensor('targets', linked_embeddings) sources_bxnxs = tf.reshape(sources.tensor, [stride, (-1), self._source_dim]) targets_bxnxt = tf.reshape(targets.tensor, [stride, (-1), self._target_dim]) pairs_bxnxl = digraph_ops.LabelPotentialsFromTokenPairs(sources_bxnxs, targets_bxnxt, weights_pair) sources_bxnxl = digraph_ops.LabelPotentialsFromTokens(sources_bxnxs, weights_source) targets_bxnxl = digraph_ops.LabelPotentialsFromTokens(targets_bxnxt, weights_target) labels_bxnxl = (((pairs_bxnxl + sources_bxnxl) + targets_bxnxl) + biases) return [tf.reshape(labels_bxnxl, [(-1), self._num_labels])]
'Reads a single batch of sentences.'
def read(self):
if self._session: (sentences, is_last) = self._session.run([self._source, self._is_last]) if is_last: self._session.close() self._session = None else: (sentences, is_last) = ([], True) return (sentences, is_last)
'Reads the entire corpus, and returns in a list.'
def corpus(self):
tf.logging.info('Reading corpus...') corpus = [] while True: (sentences, is_last) = self.read() corpus.extend(sentences) if is_last: break tf.logging.info(('Read %d sentences.' % len(corpus))) return corpus
'Adds a sentence to the corpus.'
def _add_sentence(self, tags, heads, labels, corpus):
sentence = sentence_pb2.Sentence() for (tag, head, label) in zip(tags, heads, labels): sentence.token.add(word='x', start=0, end=0, tag=tag, head=head, label=label) corpus.append(sentence.SerializeToString())
'Assert that an object has zero length. Args: container: Anything that implements the collections.Sized interface. msg: Optional message to report on failure.'
def assertEmpty(self, container, msg=None):
if (not isinstance(container, collections.Sized)): self.fail('Expected a Sized object, got: {!r}'.format(type(container).__name__), msg) if len(container): self.fail('{!r} has length of {}.'.format(container, len(container)), msg)
'Assert that an object has non-zero length. Args: container: Anything that implements the collections.Sized interface. msg: Optional message to report on failure.'
def assertNotEmpty(self, container, msg=None):
if (not isinstance(container, collections.Sized)): self.fail('Expected a Sized object, got: {!r}'.format(type(container).__name__), msg) if (not len(container)): self.fail('{!r} has length of 0.'.format(container), msg)
'Tests the default hyperparameter settings.'
def testTraining(self):
self.RunTraining(self.MakeHyperparams())
'Adds code coverage for gradient clipping.'
def testTrainingWithGradientClipping(self):
self.RunTraining(self.MakeHyperparams(gradient_clip_norm=1.25))
'Adds code coverage for ADAM and the use of moving averaging.'
def testTrainingWithAdamAndAveraging(self):
self.RunTraining(self.MakeHyperparams(learning_method='adam', use_moving_average=True))
'Adds code coverage for CompositeOptimizer.'
def testTrainingWithCompositeOptimizer(self):
grid_point = self.MakeHyperparams(learning_method='composite') grid_point.composite_optimizer_spec.method1.learning_method = 'adam' grid_point.composite_optimizer_spec.method2.learning_method = 'momentum' grid_point.composite_optimizer_spec.method2.momentum = 0.9 self.RunTraining(grid_point)
'Checks that ops ending up at root are called in the expected order. To check the order, we find a path along the directed graph formed by the inputs of each op. If op X has a chain of inputs to op Y, then X cannot be executed before Y. There may be multiple paths between any two ops, but the ops along any path are executed in that order. Therefore, we look up the expected ops in reverse order. Args: name: string name of the endpoint, for logging. endpoint: node whose execution we want to check. expected_op_order: string list of op types, in the order we expecte them to be executed leading up to `endpoint`.'
def checkOpOrder(self, name, endpoint, expected_op_order):
for target in reversed(expected_op_order): path = _find_input_path_to_type(endpoint, target) self.assertNotEmpty(path) logging.info('path[%d] from %s to %s: %s', len(path), name, target, [_as_op(x).type for x in path]) endpoint = path[(-1)]
'Generates a MasterBuilder and TrainTarget based on a simple spec.'
def getBuilderAndTarget(self, test_name, master_spec_path='simple_parser_master_spec.textproto'):
master_spec = self.LoadSpec(master_spec_path) hyperparam_config = spec_pb2.GridPoint() target = spec_pb2.TrainTarget() target.name = ('test-%s-train' % test_name) target.component_weights.extend(([0] * len(master_spec.component))) target.component_weights[(-1)] = 1.0 target.unroll_using_oracle.extend(([False] * len(master_spec.component))) target.unroll_using_oracle[(-1)] = True builder = graph_builder.MasterBuilder(master_spec, hyperparam_config, pool_scope=test_name) return (builder, target)
'Checks that GetSession and ReleaseSession are called in order.'
def testGetSessionReleaseSession(self):
test_name = 'get-session-release-session' with tf.Graph().as_default(): (builder, target) = self.getBuilderAndTarget(test_name) train = builder.add_training_from_config(target) anno = builder.add_annotation(test_name) path = _find_input_path_to_type(train['run'], 'foo') self.assertEmpty(path) self.checkOpOrder('train', train['run'], ['GetSession', 'ReleaseSession']) self.checkOpOrder('annotations', anno['annotations'], ['GetSession', 'ReleaseSession'])
'Checks that train[\'run\'] and \'annotations\' call AttachDataReader.'
def testAttachDataReader(self):
test_name = 'attach-data-reader' with tf.Graph().as_default(): (builder, target) = self.getBuilderAndTarget(test_name) train = builder.add_training_from_config(target) anno = builder.add_annotation(test_name) self.checkOpOrder('train', train['run'], ['GetSession', 'AttachDataReader', 'ReleaseSession']) self.checkOpOrder('annotations', anno['annotations'], ['GetSession', 'AttachDataReader', 'ReleaseSession'])
'Checks that \'annotations\' doesn\'t call SetTracing if disabled.'
def testSetTracingFalse(self):
test_name = 'set-tracing-false' with tf.Graph().as_default(): (builder, _) = self.getBuilderAndTarget(test_name) anno = builder.add_annotation(test_name, enable_tracing=False) path = _find_input_path_to_type(anno['annotations'], 'ReleaseSession') self.assertNotEmpty(path) path = _find_input_path_to_type(path[(-1)], 'AttachDataReader') self.assertNotEmpty(path) set_tracing_path = _find_input_path_to_type(path[(-1)], 'SetTracing') self.assertEmpty(set_tracing_path) path = _find_input_path_to_type(path[(-1)], 'GetSession') self.assertNotEmpty(path)
'Checks that \'annotations\' does call SetTracing if enabled.'
def testSetTracingTrue(self):
test_name = 'set-tracing-true' with tf.Graph().as_default(): (builder, _) = self.getBuilderAndTarget(test_name) anno = builder.add_annotation(test_name, enable_tracing=True) self.checkOpOrder('annotations', anno['annotations'], ['GetSession', 'SetTracing', 'AttachDataReader', 'ReleaseSession']) self.checkOpOrder('traces', anno['traces'], ['GetSession', 'SetTracing', 'AttachDataReader', 'ReleaseSession'])
'Creates ops for converting the input to either format. If \'tensor\' is used, then a conversion from [stride * steps, dim] to [steps + 1, stride, dim] is performed for dynamic_tensor reads. If \'array\' is used, then a conversion from [steps + 1, stride, dim] to [stride * steps, dim] is performed for bulk_tensor reads. Args: tensor: Bulk tensor input. array: TensorArray dynamic input. stride: stride of bulk tensor. Not used for dynamic. dim: dim of bulk tensor. Not used for dynamic.'
def __init__(self, tensor=None, array=None, stride=None, dim=None):
if (tensor is not None): check.IsNone(array, 'Cannot initialize from tensor and array') check.NotNone(stride, 'Stride is required for bulk tensor') check.NotNone(dim, 'Dim is required for bulk tensor') self._bulk_tensor = tensor with tf.name_scope('convert_to_dyn'): tensor = tf.reshape(tensor, [stride, (-1), dim]) tensor = tf.transpose(tensor, perm=[1, 0, 2]) pad = tf.zeros([1, stride, dim], dtype=tensor.dtype) self._array_tensor = tf.concat([pad, tensor], 0) if (array is not None): check.IsNone(tensor, 'Cannot initialize from both tensor and array') with tf.name_scope('convert_to_bulk'): self._bulk_tensor = convert_network_state_tensorarray(array) with tf.name_scope('convert_to_dyn'): self._array_tensor = array.stack()
'Inits NamedTensor with tensor, name and optional dim.'
def __init__(self, tensor, name, dim=None):
self.tensor = tensor self.name = name self.dim = dim
'Construct variables to normalize an input of given shape. Arguments: component: ComponentBuilder handle. name: Human readable name to organize the variables. shape: Shape of the layer to be normalized. dtype: Type of the layer to be normalized.'
def __init__(self, component, name, shape, dtype):
self._name = name self._shape = shape self._component = component beta = tf.get_variable(('beta_%s' % name), shape=shape, dtype=dtype, initializer=tf.zeros_initializer()) gamma = tf.get_variable(('gamma_%s' % name), shape=shape, dtype=dtype, initializer=tf.ones_initializer()) self._params = [beta, gamma]
'Apply normalization to input. The shape must match the declared shape in the constructor. [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.] Args: inputs: Input tensor Returns: Normalized version of input tensor. Raises: ValueError: if inputs has undefined rank.'
def normalize(self, inputs):
inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if (inputs_rank is None): raise ValueError(('Inputs %s has undefined rank.' % inputs.name)) axis = range(1, inputs_rank) beta = self._component.get_variable(('beta_%s' % self._name)) gamma = self._component.get_variable(('gamma_%s' % self._name)) with tf.variable_scope(('layer_norm_%s' % self._name)): (mean, variance) = nn.moments(inputs, axis, keep_dims=True) variance_epsilon = 1e-12 outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, variance_epsilon) outputs.set_shape(inputs_shape) return outputs
'Creates a new tensor array to store this layer\'s activations. Arguments: stride: Possibly dynamic batch * beam size with which to initialize the tensor array Returns: TensorArray object'
def create_array(self, stride):
check.Gt(self.dim, 0, 'Cannot create array when dimension is dynamic') tensor_array = ta.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, clear_after_read=False, infer_shape=False, name=('%s_array' % self.name)) initial_value = tf.zeros([stride, self.dim]) return tensor_array.write(0, initial_value)
'Initializes parameters for embedding matrices. The subclass may provide optional lists of initial layers and context layers to allow this base class constructor to use accessors like get_layer_size(), which is required for networks that may be used self-recurrently. Args: component: parent ComponentBuilderBase object. init_layers: optional initial layers. init_context_layers: optional initial context layers.'
def __init__(self, component, init_layers=None, init_context_layers=None):
self._component = component self._params = [] self._layers = (init_layers if init_layers else []) self._regularized_weights = [] self._context_layers = (init_context_layers if init_context_layers else []) self._fixed_feature_dims = {} self._linked_feature_dims = {} for (channel_id, spec) in enumerate(component.spec.fixed_feature): check.NotIn(spec.name, self._fixed_feature_dims, 'Duplicate fixed feature') check.Gt(spec.size, 0, 'Invalid fixed feature size') if (spec.embedding_dim > 0): fixed_dim = spec.embedding_dim self._params.append(add_embeddings(channel_id, spec)) else: fixed_dim = 1 self._fixed_feature_dims[spec.name] = (spec.size * fixed_dim) for (channel_id, spec) in enumerate(component.spec.linked_feature): check.NotIn(spec.name, self._linked_feature_dims, 'Duplicate linked feature') check.Gt(spec.size, 0, 'Invalid linked feature size') if (spec.source_component == component.name): source_array_dim = self.get_layer_size(spec.source_layer) else: source = component.master.lookup_component[spec.source_component] source_array_dim = source.network.get_layer_size(spec.source_layer) if (spec.embedding_dim != (-1)): check.Gt(source_array_dim, 0, 'Cannot embed linked feature with dynamic dimension') self._params.append(tf.get_variable(linked_embeddings_name(channel_id), [(source_array_dim + 1), spec.embedding_dim], initializer=tf.random_normal_initializer(stddev=(1 / (spec.embedding_dim ** 0.5))))) self._linked_feature_dims[spec.name] = (spec.size * spec.embedding_dim) else: self._linked_feature_dims[spec.name] = (spec.size * source_array_dim) input_dims = (self._fixed_feature_dims.values() + self._linked_feature_dims.values()) if any(((x < 0) for x in input_dims)): self._concatenated_input_dim = (-1) else: self._concatenated_input_dim = sum(input_dims) tf.logging.info('component %s concat_input_dim %s', component.name, self._concatenated_input_dim) if self._component.spec.attention_component: attention_source_component = self._component.master.lookup_component[self._component.spec.attention_component] attention_hidden_layer_sizes = map(int, attention_source_component.spec.network_unit.parameters['hidden_layer_sizes'].split(',')) attention_hidden_layer_size = attention_hidden_layer_sizes[(-1)] hidden_layer_sizes = map(int, component.spec.network_unit.parameters['hidden_layer_sizes'].split(',')) hidden_layer_size = hidden_layer_sizes[(-1)] self._params.append(tf.get_variable('attention_weights_pm_0', [attention_hidden_layer_size, hidden_layer_size], initializer=tf.random_normal_initializer(stddev=0.0001))) self._params.append(tf.get_variable('attention_weights_hm_0', [hidden_layer_size, hidden_layer_size], initializer=tf.random_normal_initializer(stddev=0.0001))) self._params.append(tf.get_variable('attention_bias_0', [1, hidden_layer_size], initializer=tf.zeros_initializer())) self._params.append(tf.get_variable('attention_bias_1', [1, hidden_layer_size], initializer=tf.zeros_initializer())) self._params.append(tf.get_variable('attention_weights_pu', [attention_hidden_layer_size, component.num_actions], initializer=tf.random_normal_initializer(stddev=0.0001)))
'Constructs a feed-forward unit based on the features and context tensors. Args: fixed_embeddings: list of NamedTensor objects linked_embeddings: list of NamedTensor objects context_tensor_arrays: optional list of TensorArray objects used for implicit recurrence. attention_tensor: optional Tensor used for attention. during_training: whether to create a network for training (vs inference). stride: int scalar tensor containing the stride required for bulk computation. Returns: A list of tensors corresponding to the list of layers.'
@abc.abstractmethod def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
pass
'Gets the index of the given named layer of the network.'
def get_layer_index(self, layer_name):
return [x.name for x in self.layers].index(layer_name)
'Gets the size of the given named layer of the network. Args: layer_name: string name of layer to look update Returns: the size of the layer. Raises: KeyError: if the layer_name to look up doesn\'t exist.'
def get_layer_size(self, layer_name):
for layer in self.layers: if (layer.name == layer_name): return layer.dim raise KeyError('Layer {} not found in component {}'.format(layer_name, self._component.name))
'Pulls out the logits from the tensors produced by this unit. Args: network_tensors: list of tensors as output by create(). Raises: NotImplementedError: by default a \'logits\' tensor need not be implemented.'
def get_logits(self, network_tensors):
raise NotImplementedError()