desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Gets the weights that need to be regularized.'
def get_l2_regularized_weights(self):
return self.regularized_weights
'Compute the attention term for the network unit.'
def attention(self, last_layer, attention_tensor):
h_tensor = attention_tensor focus_tensor = tf.nn.tanh((tf.matmul(h_tensor, self._component.get_variable('attention_weights_pm_0'), name='h_x_pm') + self._component.get_variable('attention_bias_0'))) context_tensor = tf.nn.tanh((tf.matmul(last_layer, self._component.get_variable('attention_weights_hm_0'), name='l_x_hm') + self._component.get_variable('attention_bias_1'))) z_vec = tf.reduce_sum(tf.multiply(focus_tensor, context_tensor), 1) p_vec = tf.nn.softmax(tf.reshape(z_vec, [1, (-1)])) r_vec = tf.expand_dims(tf.reduce_sum(tf.multiply(h_tensor, tf.reshape(p_vec, [(-1), 1]), name='time_together2'), 0), 0) return tf.matmul(r_vec, self._component.get_variable('attention_weights_pu'), name='time_together3')
'Initializes parameters required to run this network. Args: component: parent ComponentBuilderBase object. Parameters used to construct the network: hidden_layer_sizes: comma-separated list of ints, indicating the number of hidden units in each hidden layer. layer_norm_input (False): Whether or not to apply layer normalization on the concatenated input to the network. layer_norm_hidden (False): Whether or not to apply layer normalization to the first set of hidden layer activations. nonlinearity (\'relu\'): Name of function from module "tf.nn" to apply to each hidden layer; e.g., "relu" or "elu". dropout_keep_prob (-1.0): The probability that an input is not dropped. If >= 1.0, disables dropout. If < 0.0, uses the global |dropout_rate| hyperparameter. dropout_per_sequence (False): If true, sample the dropout mask once per sequence, instead of once per step. See Gal and Ghahramani (https://arxiv.org/abs/1512.05287). dropout_all_layers (False): If true, apply dropout to the input of all hidden layers, instead of just applying it to the network input. Hyperparameters used: dropout_rate: The probability that an input is not dropped. Only used when the |dropout_keep_prob| parameter is negative.'
def __init__(self, component):
self._attrs = get_attrs_with_defaults(component.spec.network_unit.parameters, defaults={'hidden_layer_sizes': '', 'layer_norm_input': False, 'layer_norm_hidden': False, 'nonlinearity': 'relu', 'dropout_keep_prob': (-1.0), 'dropout_per_sequence': False, 'dropout_all_layers': False}) self._hidden_layer_sizes = (map(int, self._attrs['hidden_layer_sizes'].split(',')) if self._attrs['hidden_layer_sizes'] else []) super(FeedForwardNetwork, self).__init__(component) self._dropout_rate = self._attrs['dropout_keep_prob'] if (self._dropout_rate < 0.0): self._dropout_rate = component.master.hyperparams.dropout_rate self._layer_norm_input = None self._layer_norm_hidden = None if self._attrs['layer_norm_input']: self._layer_norm_input = LayerNorm(self._component, 'concat_input', self._concatenated_input_dim, tf.float32) self._params.extend(self._layer_norm_input.params) if self._attrs['layer_norm_hidden']: self._layer_norm_hidden = LayerNorm(self._component, 'layer_0', self._hidden_layer_sizes[0], tf.float32) self._params.extend(self._layer_norm_hidden.params) self._nonlinearity = getattr(tf.nn, self._attrs['nonlinearity']) self._weights = [] last_layer_dim = self._concatenated_input_dim for (index, hidden_layer_size) in enumerate(self._hidden_layer_sizes): weights = tf.get_variable(('weights_%d' % index), [last_layer_dim, hidden_layer_size], initializer=tf.random_normal_initializer(stddev=0.0001)) self._params.append(weights) if ((index > 0) or (self._layer_norm_hidden is None)): self._params.append(tf.get_variable(('bias_%d' % index), [hidden_layer_size], initializer=tf.constant_initializer(0.2, dtype=tf.float32))) self._weights.append(weights) self._layers.append(Layer(component, name=('layer_%d' % index), dim=hidden_layer_size)) last_layer_dim = hidden_layer_size if self._hidden_layer_sizes: self._layers.append(Layer(component, 'last_layer', last_layer_dim)) self._regularized_weights.extend(self._weights) if component.num_actions: self._params.append(tf.get_variable('weights_softmax', [last_layer_dim, component.num_actions], initializer=tf.random_normal_initializer(stddev=0.0001))) self._params.append(tf.get_variable('bias_softmax', [component.num_actions], initializer=tf.zeros_initializer())) self._layers.append(Layer(component, name='logits', dim=component.num_actions))
'See base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
input_tensor = get_input_tensor(fixed_embeddings, linked_embeddings) if during_training: input_tensor.set_shape([None, self._concatenated_input_dim]) input_tensor = self._maybe_apply_dropout(input_tensor, stride) if self._layer_norm_input: input_tensor = self._layer_norm_input.normalize(input_tensor) tensors = [] last_layer = input_tensor for (index, hidden_layer_size) in enumerate(self._hidden_layer_sizes): acts = tf.matmul(last_layer, self._component.get_variable(('weights_%d' % index))) if (during_training and self._attrs['dropout_all_layers'] and (index > 0)): acts.set_shape([None, hidden_layer_size]) acts = self._maybe_apply_dropout(acts, stride) if ((index == 0) and self._layer_norm_hidden): acts = self._layer_norm_hidden.normalize(acts) else: acts = tf.nn.bias_add(acts, self._component.get_variable(('bias_%d' % index))) last_layer = self._nonlinearity(acts) tensors.append(last_layer) if self._hidden_layer_sizes: tensors.append(last_layer) if (self._layers[(-1)].name == 'logits'): logits = (tf.matmul(last_layer, self._component.get_variable('weights_softmax')) + self._component.get_variable('bias_softmax')) if self._component.spec.attention_component: logits += self.attention(last_layer, attention_tensor) logits = tf.identity(logits, name=self._layers[(-1)].name) tensors.append(logits) return tensors
'See base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
input_tensor = get_input_tensor(fixed_embeddings, linked_embeddings) assert (len(context_tensor_arrays) == 2) length = context_tensor_arrays[0].size() x2i = self._component.get_variable('x2i') h2i = self._component.get_variable('h2i') c2i = self._component.get_variable('c2i') bi = self._component.get_variable('bi') x2o = self._component.get_variable('x2o') h2o = self._component.get_variable('h2o') c2o = self._component.get_variable('c2o') bo = self._component.get_variable('bo') x2c = self._component.get_variable('x2c') h2c = self._component.get_variable('h2c') bc = self._component.get_variable('bc') i_h_tm1 = context_tensor_arrays[0].read((length - 1)) i_c_tm1 = context_tensor_arrays[1].read((length - 1)) if (during_training and (self._input_dropout_rate < 1)): input_tensor = tf.nn.dropout(input_tensor, self._input_dropout_rate) i_ait = (((tf.matmul(input_tensor, x2i) + tf.matmul(i_h_tm1, h2i)) + tf.matmul(i_c_tm1, c2i)) + bi) i_it = tf.sigmoid(i_ait) i_ft = (tf.ones([1, 1]) - i_it) i_awt = ((tf.matmul(input_tensor, x2c) + tf.matmul(i_h_tm1, h2c)) + bc) i_wt = tf.tanh(i_awt) ct = tf.add(tf.multiply(i_it, i_wt), tf.multiply(i_ft, i_c_tm1), name='lstm_c') i_aot = (((tf.matmul(input_tensor, x2o) + tf.matmul(ct, c2o)) + tf.matmul(i_h_tm1, h2o)) + bo) i_ot = tf.sigmoid(i_aot) ph_t = tf.tanh(ct) ht = tf.multiply(i_ot, ph_t, name='lstm_h') if (during_training and (self._recurrent_dropout_rate < 1)): ht = tf.nn.dropout(ht, self._recurrent_dropout_rate, name='lstm_h_dropout') h = tf.identity(ht, name='layer_0') logits = tf.nn.xw_plus_b(ht, tf.get_variable('weights_softmax'), tf.get_variable('bias_softmax')) if self._component.spec.attention_component: logits += self.attention(ht, attention_tensor) logits = tf.identity(logits, name='logits') tensors = [ht, ct, h, logits] return tensors
'Initializes kernels and biases for this convolutional net. Args: component: parent ComponentBuilderBase object. Parameters used to construct the network: widths: comma separated list of ints, number of steps input to the convolutional kernel at every layer. depths: comma separated list of ints, number of channels input to the convolutional kernel at every layer. output_embedding_dim: int, number of output channels for the convolutional kernel of the last layer, which receives no ReLU activation and therefore can be used in a softmax output. If zero, this final layer is disabled entirely. nonlinearity (\'relu\'): Name of function from module "tf.nn" to apply to each hidden layer; e.g., "relu" or "elu". dropout_keep_prob (-1.0): The probability that an input is not dropped. If >= 1.0, disables dropout. If < 0.0, uses the global |dropout_rate| hyperparameter. dropout_per_sequence (False): If true, sample the dropout mask once per sequence, instead of once per step. See Gal and Ghahramani (https://arxiv.org/abs/1512.05287). Hyperparameters used: dropout_rate: The probability that an input is not dropped. Only used when the |dropout_keep_prob| parameter is negative.'
def __init__(self, component):
super(ConvNetwork, self).__init__(component) self._attrs = get_attrs_with_defaults(component.spec.network_unit.parameters, defaults={'widths': '', 'depths': '', 'output_embedding_dim': 0, 'nonlinearity': 'relu', 'dropout_keep_prob': (-1.0), 'dropout_per_sequence': False}) self._weights = [] self._biases = [] self._widths = map(int, self._attrs['widths'].split(',')) self._depths = map(int, self._attrs['depths'].split(',')) self._output_dim = self._attrs['output_embedding_dim'] if self._output_dim: self._depths.append(self._output_dim) self.kernel_shapes = [] for i in range((len(self._depths) - 1)): self.kernel_shapes.append([1, self._widths[i], self._depths[i], self._depths[(i + 1)]]) for i in range((len(self._depths) - 1)): with tf.variable_scope(('conv%d' % i)): self._weights.append(tf.get_variable('weights', self.kernel_shapes[i], initializer=tf.random_normal_initializer(stddev=0.0001), dtype=tf.float32)) bias_init = (0.0 if (i == (len(self._widths) - 1)) else 0.2) self._biases.append(tf.get_variable('biases', self.kernel_shapes[i][(-1)], initializer=tf.constant_initializer(bias_init), dtype=tf.float32)) self._nonlinearity = getattr(tf.nn, self._attrs['nonlinearity']) self._dropout_rate = self._attrs['dropout_keep_prob'] if (self._dropout_rate < 0.0): self._dropout_rate = component.master.hyperparams.dropout_rate self._params.extend((self._weights + self._biases)) self._layers.append(Layer(component, name='conv_output', dim=self._depths[(-1)])) self._regularized_weights.extend((self._weights[:(-1)] if self._output_dim else self._weights))
'Requires |stride|; otherwise see base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
if (stride is None): raise RuntimeError("ConvNetwork needs 'stride' and must be called in the bulk feature extractor component.") input_tensor = get_input_tensor_with_stride(fixed_embeddings, linked_embeddings, stride) del context_tensor_arrays, attention_tensor conv = tf.expand_dims(input_tensor, 1) for i in range((len(self._depths) - 1)): with tf.variable_scope(('conv%d' % i), reuse=True) as scope: if during_training: conv.set_shape([None, 1, None, self._depths[i]]) conv = self._maybe_apply_dropout(conv, stride) conv = tf.nn.conv2d(conv, self._component.get_variable('weights'), [1, 1, 1, 1], padding='SAME') conv = tf.nn.bias_add(conv, self._component.get_variable('biases')) if ((i < (len(self._weights) - 1)) or (not self._output_dim)): conv = self._nonlinearity(conv, name=scope.name) return [tf.reshape(conv, [(-1), self._depths[(-1)]], name='reshape_activations')]
'Initializes kernels and biases for this convolutional net. Parameters used to construct the network: depths: comma separated list of ints, number of channels input to the convolutional kernel at every layer. widths: comma separated list of ints, number of steps input to the convolutional kernel at every layer. relu_layers: comma separate list of ints, the id of layers after which to apply a relu activation. *By default, all but the final layer will have a relu activation applied.* To generate a network with M layers, both \'depths\' and \'widths\' must be of length M. The input depth of the first layer is inferred from the total concatenated size of the input features. Args: component: parent ComponentBuilderBase object. Raises: RuntimeError: if the number of depths and weights are not equal. ValueError: if the final depth is not equal to 1.'
def __init__(self, component):
parameters = component.spec.network_unit.parameters super(PairwiseConvNetwork, self).__init__(component) self._depths = [(self._concatenated_input_dim * 2)] self._depths.extend(map(int, parameters['depths'].split(','))) self._widths = map(int, parameters['widths'].split(',')) self._num_layers = len(self._widths) if (len(self._depths) != (self._num_layers + 1)): raise RuntimeError(('Unmatched depths/weights %s/%s' % (parameters['depths'], parameters['weights']))) if (self._depths[(-1)] != 1): raise ValueError(('Final depth is not equal to 1 in %s' % parameters['depths'])) self._kernel_shapes = [] for (i, width) in enumerate(self._widths): self._kernel_shapes.append([width, width, self._depths[i], self._depths[(i + 1)]]) if parameters['relu_layers']: self._relu_layers = set(map(int, parameters['relu_layers'].split(','))) else: self._relu_layers = set(range((self._num_layers - 1))) self._weights = [] self._biases = [] for (i, kernel_shape) in enumerate(self._kernel_shapes): with tf.variable_scope(('conv%d' % i)): self._weights.append(tf.get_variable('weights', kernel_shape, initializer=tf.random_normal_initializer(stddev=0.0001), dtype=tf.float32)) bias_init = (0.0 if (i in self._relu_layers) else 0.2) self._biases.append(tf.get_variable('biases', kernel_shape[(-1)], initializer=tf.constant_initializer(bias_init), dtype=tf.float32)) self._params.extend((self._weights + self._biases)) self._layers.append(Layer(component, name='conv_output', dim=(-1))) self._regularized_weights.extend(self._weights[:(-1)])
'Requires |stride|; otherwise see base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
if (stride is None): raise ValueError("PairwiseConvNetwork needs 'stride'") input_tensor = get_input_tensor_with_stride(fixed_embeddings, linked_embeddings, stride) del context_tensor_arrays, attention_tensor, during_training num_steps = tf.shape(input_tensor)[1] arg1 = tf.expand_dims(input_tensor, 1) arg1 = tf.tile(arg1, tf.stack([1, num_steps, 1, 1])) arg2 = tf.expand_dims(input_tensor, 2) arg2 = tf.tile(arg2, tf.stack([1, 1, num_steps, 1])) conv = tf.concat([arg1, arg2], 3) for i in xrange(self._num_layers): with tf.variable_scope(('conv%d' % i), reuse=True) as scope: conv = tf.nn.conv2d(conv, self._component.get_variable('weights'), [1, 1, 1, 1], padding='SAME') conv = tf.nn.bias_add(conv, self._component.get_variable('biases')) if (i in self._relu_layers): conv = tf.nn.relu(conv, name=scope.name) return [tf.reshape(conv, [(-1), num_steps], name='reshape_activations')]
'Initializes exported layers.'
def __init__(self, component):
super(ExportFixedFeaturesNetwork, self).__init__(component) for feature_spec in component.spec.fixed_feature: name = feature_spec.name dim = self._fixed_feature_dims[name] self._layers.append(Layer(component, name, dim))
'See base class.'
def create(self, fixed_embeddings, linked_embeddings, context_tensor_arrays, attention_tensor, during_training, stride=None):
check.Eq(len(self.layers), len(fixed_embeddings)) for index in range(len(fixed_embeddings)): check.Eq(self.layers[index].name, fixed_embeddings[index].name) return [fixed_embedding.tensor for fixed_embedding in fixed_embeddings]
'Initializes weights and layers. Args: component: Parent ComponentBuilderBase object.'
def __init__(self, component):
super(SplitNetwork, self).__init__(component) parameters = component.spec.network_unit.parameters self._num_slices = int(parameters['num_slices']) check.Gt(self._num_slices, 0, 'Invalid number of slices.') check.Eq((self._concatenated_input_dim % self._num_slices), 0, ('Input dimension %s does not evenly divide into %s slices' % (self._concatenated_input_dim, self._num_slices))) self._slice_dim = int((self._concatenated_input_dim / self._num_slices)) for slice_index in xrange(self._num_slices): self._layers.append(Layer(self, ('slice_%s' % slice_index), self._slice_dim))
'Returns HTML for a container, which will be populated later. Args: height: CSS string representing the height of the element, default \'700px\'. script: Visualization script contents, if the defaults are unacceptable. init_message: Initial message to display. Returns: unicode with HTML contents.'
def initial_html(self, height='700px', script=None, init_message=None):
if (script is None): script = _load_viz_script() if (init_message is None): init_message = 'Type a sentence and press (enter) to see the trace.' (self.elt_id, div_html) = _container_div(height=height, contents='<strong>{}</strong>'.format(init_message)) html = '\n <meta charset="utf-8"/>\n {div_html}\n <script type=\'text/javascript\'>\n {script}\n </script>\n '.format(script=script, div_html=div_html) return unicode(html, 'utf-8')
'Returns a JS script HTML fragment, which will populate the container. Args: trace: binary-encoded MasterTrace string. master_spec: Master spec proto (parsed), which can improve the layout. May be required in future versions. Returns: unicode with HTML contents.'
def show_trace(self, trace, master_spec=None):
html = '\n <meta charset="utf-8"/>\n <script type=\'text/javascript\'>\n document.getElementById("{elt_id}").innerHTML = ""; // Clear previous.\n visualizeToDiv({json}, "{elt_id}", {master_spec_json});\n </script>\n '.format(json=parse_trace_json(trace), master_spec_json=_optional_master_spec_json(master_spec), elt_id=self.elt_id) return unicode(html, 'utf-8')
'Initialize the graph builder with parameters defining the network. Args: num_actions: int size of the set of parser actions num_features: int list of dimensions of the feature vectors num_feature_ids: int list of same length as num_features corresponding to the sizes of the input feature spaces embedding_sizes: int list of same length as num_features of the desired embedding layer sizes hidden_layer_sizes: int list of desired relu layer sizes; may be empty seed: optional random initializer seed to enable reproducibility gate_gradients: if True, gradient updates are computed synchronously, ensuring consistency and reproducibility use_locking: if True, use locking to avoid read-write contention when updating Variables embedding_init: sets the std dev of normal initializer of embeddings to embedding_init / embedding_size ** .5 relu_init: sets the std dev of normal initializer of relu weights to relu_init bias_init: sets constant initializer of relu bias to bias_init softmax_init: sets the std dev of normal initializer of softmax init to softmax_init averaging_decay: decay for exponential moving average when computing averaged parameters, set to 1 to do vanilla averaging use_averaging: whether to use moving averages of parameters during evals check_parameters: whether to check for NaN/Inf parameters during training check_every: checks numerics every check_every steps. allow_feature_weights: whether feature weights are allowed. only_train: the comma separated set of parameter names to train. If empty, all model parameters will be trained. arg_prefix: prefix for context parameters.'
def __init__(self, num_actions, num_features, num_feature_ids, embedding_sizes, hidden_layer_sizes, seed=None, gate_gradients=False, use_locking=False, embedding_init=1.0, relu_init=0.0001, bias_init=0.2, softmax_init=0.0001, averaging_decay=0.9999, use_averaging=True, check_parameters=True, check_every=1, allow_feature_weights=False, only_train='', arg_prefix=None, **unused_kwargs):
self._num_actions = num_actions self._num_features = num_features self._num_feature_ids = num_feature_ids self._embedding_sizes = embedding_sizes self._hidden_layer_sizes = hidden_layer_sizes self._seed = seed self._gate_gradients = gate_gradients self._use_locking = use_locking self._use_averaging = use_averaging self._check_parameters = check_parameters self._check_every = check_every self._allow_feature_weights = allow_feature_weights self._only_train = (set(only_train.split(',')) if only_train else None) self._feature_size = len(embedding_sizes) self._embedding_init = embedding_init self._relu_init = relu_init self._softmax_init = softmax_init self._arg_prefix = arg_prefix self.params = {} self.variables = {} self.inits = {} self.training = {} self.evaluation = {} self.saver = None self._averaging = {} self._averaging_decay = averaging_decay self._pretrained_embeddings = {} with tf.name_scope('params') as self._param_scope: self._relu_bias_init = tf.constant_initializer(bias_init)
'Add a model parameter w.r.t. we expect to compute gradients. _AddParam creates both regular parameters (usually for training) and averaged nodes (usually for inference). It returns one or the other based on the \'return_average\' arg. Args: shape: int list, tensor shape of the parameter to create dtype: tf.DataType, data type of the parameter name: string, name of the parameter in the TF graph initializer: optional initializer for the paramter return_average: if False, return parameter otherwise return moving average Returns: parameter or averaged parameter'
def _AddParam(self, shape, dtype, name, initializer=None, return_average=False):
if (name not in self.params): step = tf.cast(self.GetStep(), tf.float32) with tf.name_scope(self._param_scope): self.params[name] = tf.get_variable(name, shape, dtype, initializer) param = self.params[name] if (initializer is not None): self.inits[name] = state_ops.init_variable(param, initializer) if (self._averaging_decay == 1): logging.info('Using vanilla averaging of parameters.') ema = tf.train.ExponentialMovingAverage(decay=(step / (step + 1.0)), num_updates=None) else: ema = tf.train.ExponentialMovingAverage(decay=self._averaging_decay, num_updates=step) self._averaging[(name + '_avg_update')] = ema.apply([param]) self.variables[(name + '_avg_var')] = ema.average(param) self.inits[(name + '_avg_init')] = state_ops.init_variable(ema.average(param), tf.zeros_initializer()) return (self.variables[(name + '_avg_var')] if return_average else self.params[name])
'Adds an embedding matrix and passes the `features` vector through it.'
def _AddEmbedding(self, features, num_features, num_ids, embedding_size, index, return_average=False):
embedding_matrix = self._AddParam([num_ids, embedding_size], tf.float32, ('embedding_matrix_%d' % index), self._EmbeddingMatrixInitializer(index, embedding_size), return_average=return_average) embedding = EmbeddingLookupFeatures(embedding_matrix, tf.reshape(features, [(-1)], name=('feature_%d' % index)), self._allow_feature_weights) return tf.reshape(embedding, [(-1), (num_features * embedding_size)])
'Builds a feed-forward part of the net given features as input. The network topology is already defined in the constructor, so multiple calls to BuildForward build multiple networks whose parameters are all shared. It is the source of the input features and the use of the output that distinguishes each network. Args: feature_endpoints: tensors with input features to the network return_average: whether to use moving averages as model parameters Returns: logits: output of the final layer before computing softmax'
def _BuildNetwork(self, feature_endpoints, return_average=False):
assert (len(feature_endpoints) == self._feature_size) embeddings = [] for i in range(self._feature_size): embeddings.append(self._AddEmbedding(feature_endpoints[i], self._num_features[i], self._num_feature_ids[i], self._embedding_sizes[i], i, return_average=return_average)) last_layer = tf.concat(embeddings, 1) last_layer_size = self.embedding_size for (i, hidden_layer_size) in enumerate(self._hidden_layer_sizes): weights = self._AddParam([last_layer_size, hidden_layer_size], tf.float32, ('weights_%d' % i), self._ReluWeightInitializer(), return_average=return_average) bias = self._AddParam([hidden_layer_size], tf.float32, ('bias_%d' % i), self._relu_bias_init, return_average=return_average) last_layer = tf.nn.relu_layer(last_layer, weights, bias, name=('layer_%d' % i)) last_layer_size = hidden_layer_size softmax_weight = self._AddParam([last_layer_size, self._num_actions], tf.float32, 'softmax_weight', tf.random_normal_initializer(stddev=self._softmax_init, seed=self._seed), return_average=return_average) softmax_bias = self._AddParam([self._num_actions], tf.float32, 'softmax_bias', tf.zeros_initializer(), return_average=return_average) logits = tf.nn.xw_plus_b(last_layer, softmax_weight, softmax_bias, name='logits') return {'logits': logits}
'Cross entropy plus L2 loss on weights and biases of the hidden layers.'
def _AddCostFunction(self, batch_size, gold_actions, logits):
dense_golden = BatchedSparseToDense(gold_actions, self._num_actions) cross_entropy = tf.div(tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=dense_golden, logits=logits)), batch_size) regularized_params = [tf.nn.l2_loss(p) for (k, p) in self.params.items() if (k.startswith('weights') or k.startswith('bias'))] l2_loss = ((0.0001 * tf.add_n(regularized_params)) if regularized_params else 0) return {'cost': tf.add(cross_entropy, l2_loss, name='cost')}
'Builds the forward network only without the training operation. Args: task_context: file path from which to read the task context. batch_size: batch size to request from reader op. evaluation_max_steps: max number of parsing actions during evaluation, only used in beam parsing. corpus_name: name of the task input to read parses from. Returns: Dictionary of named eval nodes.'
def AddEvaluation(self, task_context, batch_size, evaluation_max_steps=300, corpus_name='documents'):
def _AssignTransitionScores(): return tf.assign(nodes['transition_scores'], nodes['logits'], validate_shape=False) def _Pass(): return tf.constant((-1.0)) unused_evaluation_max_steps = evaluation_max_steps with tf.name_scope('evaluation'): nodes = self.evaluation nodes['transition_scores'] = self._AddVariable([batch_size, self._num_actions], tf.float32, 'transition_scores', tf.constant_initializer((-1.0))) nodes.update(self._AddDecodedReader(task_context, batch_size, nodes['transition_scores'], corpus_name)) nodes.update(self._BuildNetwork(nodes['feature_endpoints'], return_average=self._use_averaging)) nodes['eval_metrics'] = cf.with_dependencies([tf.cond(tf.greater(tf.size(nodes['logits']), 0), _AssignTransitionScores, _Pass)], nodes['eval_metrics'], name='eval_metrics') return nodes
'Returns a learning rate that decays by 0.96 every decay_steps. Args: initial_learning_rate: initial value of the learning rate decay_steps: decay by 0.96 every this many steps Returns: learning rate variable.'
def _AddLearningRate(self, initial_learning_rate, decay_steps):
step = self.GetStep() return cf.with_dependencies([self._IncrementCounter(step)], tf.train.exponential_decay(initial_learning_rate, step, decay_steps, 0.96, staircase=True))
'Embeddings at the given index will be set to pretrained values.'
def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):
def _Initializer(shape, dtype=tf.float32, partition_info=None): 'Variable initializer that loads pretrained embeddings.' unused_dtype = dtype (seed1, seed2) = tf.get_seed(self._seed) t = gen_parser_ops.word_embedding_initializer(vectors=embeddings_path, task_context=task_context, embedding_init=self._embedding_init, seed=seed1, seed2=seed2) t.set_shape(shape) return t self._pretrained_embeddings[index] = _Initializer
'Builds a trainer to minimize the cross entropy cost function. Args: task_context: file path from which to read the task context batch_size: batch size to request from reader op learning_rate: initial value of the learning rate decay_steps: decay learning rate by 0.96 every this many steps momentum: momentum parameter used when training with momentum corpus_name: name of the task input to read parses from Returns: Dictionary of named training nodes.'
def AddTraining(self, task_context, batch_size, learning_rate=0.1, decay_steps=4000, momentum=0.9, corpus_name='documents'):
with tf.name_scope('training'): nodes = self.training nodes.update(self._AddGoldReader(task_context, batch_size, corpus_name)) nodes.update(self._BuildNetwork(nodes['feature_endpoints'], return_average=False)) nodes.update(self._AddCostFunction(batch_size, nodes['gold_actions'], nodes['logits'])) if self._only_train: trainable_params = [v for (k, v) in self.params.iteritems() if (k in self._only_train)] else: trainable_params = self.params.values() lr = self._AddLearningRate(learning_rate, decay_steps) optimizer = tf.train.MomentumOptimizer(lr, momentum, use_locking=self._use_locking) train_op = optimizer.minimize(nodes['cost'], var_list=trainable_params) for param in trainable_params: slot = optimizer.get_slot(param, 'momentum') self.inits[slot.name] = state_ops.init_variable(slot, tf.zeros_initializer()) self.variables[slot.name] = slot numerical_checks = [tf.check_numerics(param, message='Parameter is not finite.') for param in trainable_params if (param.dtype.base_dtype in [tf.float32, tf.float64])] check_op = tf.group(*numerical_checks) avg_update_op = tf.group(*self._averaging.values()) train_ops = [train_op] if self._check_parameters: train_ops.append(check_op) if self._use_averaging: train_ops.append(avg_update_op) nodes['train_op'] = tf.group(name='train_op', *train_ops) return nodes
'Adds ops to save and restore model parameters. Args: slim_model: whether only averaged variables are saved. Returns: the saver object.'
def AddSaver(self, slim_model=False):
with tf.name_scope(None): variables_to_save = self.params.copy() variables_to_save.update(self.variables) if slim_model: for key in variables_to_save.keys(): if (not key.endswith('avg_var')): del variables_to_save[key] self.saver = tf.train.Saver(variables_to_save) return self.saver
'Adds an op capable of reading sentences and parsing them with a beam.'
def _AddBeamReader(self, task_context, batch_size, corpus_name, until_all_final=False, always_start_new_sentences=False):
(features, state, epochs) = gen_parser_ops.beam_parse_reader(task_context=task_context, feature_size=self._feature_size, beam_size=self._beam_size, batch_size=batch_size, corpus_name=corpus_name, allow_feature_weights=self._allow_feature_weights, arg_prefix=self._arg_prefix, continue_until_all_final=until_all_final, always_start_new_sentences=always_start_new_sentences) return {'state': state, 'features': features, 'epochs': epochs}
'Adds a sequence of beam parsing steps.'
def _BuildSequence(self, batch_size, max_steps, features, state, use_average=False):
def Advance(state, step, scores_array, alive, alive_steps, *features): scores = self._BuildNetwork(features, return_average=use_average)['logits'] scores_array = scores_array.write(step, scores) (features, state, alive) = gen_parser_ops.beam_parser(state, scores, self._feature_size) return ([state, (step + 1), scores_array, alive, (alive_steps + tf.cast(alive, tf.int32))] + list(features)) def KeepGoing(*args): return tf.logical_and((args[1] < max_steps), tf.reduce_any(args[3])) step = tf.constant(0, tf.int32, []) scores_array = tensor_array_ops.TensorArray(dtype=tf.float32, size=0, dynamic_size=True) alive = tf.constant(True, tf.bool, [batch_size]) alive_steps = tf.constant(0, tf.int32, [batch_size]) t = tf.while_loop(KeepGoing, Advance, ([state, step, scores_array, alive, alive_steps] + list(features)), shape_invariants=([tf.TensorShape(None)] * (len(features) + 5)), parallel_iterations=100) return {'state': t[0], 'concat_scores': t[2].concat(), 'alive': t[3], 'alive_steps': t[4]}
'Constructs a structured learning graph.'
def MakeGraph(self, max_steps=10, beam_size=2, batch_size=1, **kwargs):
assert (max_steps > 0), 'Empty network not supported.' logging.info('MakeGraph + %s', kwargs) with self.test_session(graph=tf.Graph()) as sess: (feature_sizes, domain_sizes, embedding_dims, num_actions) = sess.run(gen_parser_ops.feature_size(task_context=self._task_context)) embedding_dims = [8, 8, 8] hidden_layer_sizes = [] learning_rate = 0.01 builder = structured_graph_builder.StructuredGraphBuilder(num_actions, feature_sizes, domain_sizes, embedding_dims, hidden_layer_sizes, seed=1, max_steps=max_steps, beam_size=beam_size, gate_gradients=True, use_locking=True, use_averaging=False, check_parameters=False, **kwargs) builder.AddTraining(self._task_context, batch_size, learning_rate=learning_rate, decay_steps=1000, momentum=0.9, corpus_name='training-corpus') builder.AddEvaluation(self._task_context, batch_size, evaluation_max_steps=25, corpus_name=None) builder.training['inits'] = tf.group(name='inits', *builder.inits.values()) return builder
'Ensures that the \'alive\' condition works in the Cond ops.'
def testParseUntilNotAlive(self):
with self.test_session(graph=tf.Graph()) as sess: t = self.MakeGraph(batch_size=3, beam_size=2, max_steps=5).training sess.run(t['inits']) for i in range(5): logging.info('run %d', i) tf_alive = t['alive'].eval() self.assertFalse(any(tf_alive))
'Ensures that Momentum training can be done using the gradients.'
def testParseMomentum(self):
self.Train() self.Train(model_cost='perceptron_loss') self.Train(model_cost='perceptron_loss', only_train='softmax_weight,softmax_bias', softmax_init=0) self.Train(only_train='softmax_weight,softmax_bias', softmax_init=0)
'Ensures that path scores computed in the beam are same in the net.'
def testPathScoresAgree(self):
(all_path_scores, beam_path_scores) = self.PathScores(iterations=1, beam_size=130, max_steps=5, batch_size=1) self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-06)
'Ensures that path scores computed in the beam are same in the net.'
def testBatchPathScoresAgree(self):
(all_path_scores, beam_path_scores) = self.PathScores(iterations=1, beam_size=130, max_steps=5, batch_size=22) self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-06)
'Ensures that path scores computed in the beam are same in the net.'
def testBatchOneStepPathScoresAgree(self):
(all_path_scores, beam_path_scores) = self.PathScores(iterations=1, beam_size=130, max_steps=1, batch_size=22) self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-06)
'Tests that Create can create the Impl subclass.'
def testCanCreateImpl(self):
try: impl = registry_test_base.Base.Create((PATH + 'registry_test_impl.Impl'), 'hello world') except ValueError: self.fail(('Create raised ValueError: %s' % traceback.format_exc())) self.assertEqual('hello world', impl.Get())
'Tests that Create can create an Impl subclass via Alias.'
def testCanCreateByAlias(self):
try: impl = registry_test_base.Base.Create((PATH + 'registry_test_impl.Alias'), 'hello world') except ValueError: self.fail(('Create raised ValueError: %s' % traceback.format_exc())) self.assertEqual('hello world', impl.Get())
'Tests that Create fails if the class is not a subclass of Base.'
def testCannotCreateNonSubclass(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'registry_test_impl.NonSubclass'), 'hello world')
'Tests that Create fails if the name does not identify a class.'
def testCannotCreateNonClass(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'registry_test_impl.variable'), 'hello world') with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'registry_test_impl.Function'), 'hello world')
'Tests that Create fails if the class does not exist in the module.'
def testCannotCreateMissingClass(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'registry_test_impl.MissingClass'), 'hello world')
'Tests that Create fails if the module does not exist.'
def testCannotCreateMissingModule(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create((PATH + 'missing.SomeClass'), 'hello world')
'Tests that Create fails if the package does not exist.'
def testCannotCreateMissingPackage(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create('missing.package.path.module.SomeClass', 'hello world')
'Tests that Create fails on malformed type names.'
def testCannotCreateMalformedType(self):
with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create('oneword', 'hello world') with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create('hyphen-ated', 'hello world') with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create('has space', 'hello world') with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create(' ', 'hello world') with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create('', 'hello world')
'Tests that Create can create the Impl subclass using a relative path.'
def testCanCreateWithRelativePath(self):
for name in [(PATH + 'registry_test_impl.Impl'), 'syntaxnet.util.registry_test_impl.Impl', 'util.registry_test_impl.Impl', 'registry_test_impl.Impl']: value = ('created via %s' % name) try: impl = registry_test_base.Base.Create(name, value) except ValueError: self.fail(('Create raised ValueError: %s' % traceback.format_exc())) self.assertTrue((impl is not None)) self.assertEqual(value, impl.Get())
'Tests that Create fails if a relative path cannot be resolved.'
def testCannotResolveRelativeName(self):
for name in ['nlp.saft.opensource.syntaxnet.util.registry_test_base.Impl', 'saft.bad.registry_test_impl.Impl', 'missing.registry_test_impl.Impl', 'registry_test_impl.Bad', 'Impl']: with self.assertRaisesRegexp(ValueError, 'Failed to create'): registry_test_base.Base.Create(name, 'hello world')
'Creates an implementation with a custom string.'
def __init__(self, value):
self.value = value
'Returns the current value.'
def Get(self):
return self.value
'Overridden in subclasses.'
def Get(self):
return None
'Constructs a postprocessor. Args: pca_params_npz_path: Path to a NumPy-format .npz file that contains the PCA parameters used in postprocessing.'
def __init__(self, pca_params_npz_path):
params = np.load(pca_params_npz_path) self._pca_matrix = params[vggish_params.PCA_EIGEN_VECTORS_NAME] self._pca_means = params[vggish_params.PCA_MEANS_NAME].reshape((-1), 1) assert (self._pca_matrix.shape == (vggish_params.EMBEDDING_SIZE, vggish_params.EMBEDDING_SIZE)), ('Bad PCA matrix shape: %r' % (self._pca_matrix.shape,)) assert (self._pca_means.shape == (vggish_params.EMBEDDING_SIZE, 1)), ('Bad PCA means shape: %r' % (self._pca_means.shape,))
'Applies postprocessing to a batch of embeddings. Args: embeddings_batch: An nparray of shape [batch_size, embedding_size] containing output from the embedding layer of VGGish. Returns: An nparray of the same shape as the input but of type uint8, containing the PCA-transformed and quantized version of the input.'
def postprocess(self, embeddings_batch):
assert (len(embeddings_batch.shape) == 2), ('Expected 2-d batch, got %r' % (embeddings_batch.shape,)) assert (embeddings_batch.shape[1] == vggish_params.EMBEDDING_SIZE), ('Bad batch shape: %r' % (embeddings_batch.shape,)) pca_applied = np.dot(self._pca_matrix, (embeddings_batch.T - self._pca_means)).T clipped_embeddings = np.clip(pca_applied, vggish_params.QUANTIZE_MIN_VAL, vggish_params.QUANTIZE_MAX_VAL) quantized_embeddings = ((clipped_embeddings - vggish_params.QUANTIZE_MIN_VAL) * (255.0 / (vggish_params.QUANTIZE_MAX_VAL - vggish_params.QUANTIZE_MIN_VAL))) quantized_embeddings = quantized_embeddings.astype(np.uint8) return quantized_embeddings
'Class members that will be assigned by any class that actually uses this class.'
def __init__(self):
self.restrict_to_largest_cc = None self.robot = None self.env = None self.category_list = None self.traversible = None
'Based on the node orientation returns X, and Y axis. Used to sample the map in egocentric coordinate frame.'
def get_loc_axis(self, node, delta_theta, perturb=None):
if (type(node) == tuple): node = np.array([node]) if (perturb is None): perturb = np.zeros((node.shape[0], 4)) xyt = self.to_actual_xyt_vec(node) x = (xyt[:, [0]] + perturb[:, [0]]) y = (xyt[:, [1]] + perturb[:, [1]]) t = (xyt[:, [2]] + perturb[:, [2]]) theta = (t * delta_theta) loc = np.concatenate((x, y), axis=1) x_axis = np.concatenate((np.cos(theta), np.sin(theta)), axis=1) y_axis = np.concatenate((np.cos((theta + (np.pi / 2.0))), np.sin((theta + (np.pi / 2.0)))), axis=1) y_axis[np.where((perturb[:, 3] > 0))[0], :] *= (-1.0) return (loc, x_axis, y_axis, theta)
'Converts from node to location on the map.'
def to_actual_xyt(self, pqr):
(p, q, r) = pqr if (self.task.n_ori == 6): out = (((p - (q * 0.5)) + self.task.origin_loc[0]), (((q * np.sqrt(3.0)) / 2.0) + self.task.origin_loc[1]), r) elif (self.task.n_ori == 4): out = ((p + self.task.origin_loc[0]), (q + self.task.origin_loc[1]), r) return out
'Converts from node array to location array on the map.'
def to_actual_xyt_vec(self, pqr):
p = pqr[:, 0][:, np.newaxis] q = pqr[:, 1][:, np.newaxis] r = pqr[:, 2][:, np.newaxis] if (self.task.n_ori == 6): out = np.concatenate((((p - (q * 0.5)) + self.task.origin_loc[0]), (((q * np.sqrt(3.0)) / 2.0) + self.task.origin_loc[1]), r), axis=1) elif (self.task.n_ori == 4): out = np.concatenate(((p + self.task.origin_loc[0]), (q + self.task.origin_loc[1]), r), axis=1) return out
'Returns if the given set of nodes is valid or not.'
def raw_valid_fn_vec(self, xyt):
height = self.traversible.shape[0] width = self.traversible.shape[1] x = np.round(xyt[:, [0]]).astype(np.int32) y = np.round(xyt[:, [1]]).astype(np.int32) is_inside = np.all(np.concatenate(((x >= 0), (y >= 0), (x < width), (y < height)), axis=1), axis=1) x = np.minimum(np.maximum(x, 0), (width - 1)) y = np.minimum(np.maximum(y, 0), (height - 1)) ind = np.ravel_multi_index((y, x), self.traversible.shape) is_traversible = self.traversible.ravel()[ind] is_valid = np.all(np.concatenate((is_inside[:, np.newaxis], is_traversible), axis=1), axis=1) return is_valid
'Returns if the given set of nodes is valid or not.'
def valid_fn_vec(self, pqr):
xyt = self.to_actual_xyt_vec(np.array(pqr)) height = self.traversible.shape[0] width = self.traversible.shape[1] x = np.round(xyt[:, [0]]).astype(np.int32) y = np.round(xyt[:, [1]]).astype(np.int32) is_inside = np.all(np.concatenate(((x >= 0), (y >= 0), (x < width), (y < height)), axis=1), axis=1) x = np.minimum(np.maximum(x, 0), (width - 1)) y = np.minimum(np.maximum(y, 0), (height - 1)) ind = np.ravel_multi_index((y, x), self.traversible.shape) is_traversible = self.traversible.ravel()[ind] is_valid = np.all(np.concatenate((is_inside[:, np.newaxis], is_traversible), axis=1), axis=1) return is_valid
'Returns the feasible set of actions from the current node.'
def get_feasible_actions(self, node_ids):
a = np.zeros((len(node_ids), self.task_params.num_actions), dtype=np.int32) gtG = self.task.gtG next_node = [] for (i, c) in enumerate(node_ids): neigh = gtG.vertex(c).out_neighbours() neigh_edge = gtG.vertex(c).out_edges() nn = {} for (n, e) in zip(neigh, neigh_edge): _ = gtG.ep['action'][e] a[(i, _)] = 1 nn[_] = int(n) next_node.append(nn) return (a, next_node)
'Returns the new node after taking the action action. Stays at the current node if the action is invalid.'
def take_action(self, current_node_ids, action):
(actions, next_node_ids) = self.get_feasible_actions(current_node_ids) new_node_ids = [] for (i, (c, a)) in enumerate(zip(current_node_ids, action)): if (actions[(i, a)] == 1): new_node_ids.append(next_node_ids[i][a]) else: new_node_ids.append(c) return new_node_ids
'Sets the SwiftshaderRenderer object used for rendering.'
def set_r_obj(self, r_obj):
self.r_obj = r_obj
'Saves traversible space along with nodes generated on the graph. Takes the seed as input.'
def _debug_save_map_nodes(self, seed):
img_path = os.path.join(self.logdir, '{:s}_{:d}_graph.png'.format(self.building_name, seed)) node_xyt = self.to_actual_xyt_vec(self.task.nodes) plt.set_cmap('jet') (fig, ax) = utils.subplot(plt, (1, 1), (12, 12)) ax.plot(node_xyt[:, 0], node_xyt[:, 1], 'm.') ax.set_axis_off() ax.axis('equal') if (self.room_dims is not None): for (i, r) in enumerate((self.room_dims['dims'] * 1)): min_ = (r[:3] * 1) max_ = (r[3:] * 1) (xmin, ymin, zmin) = min_ (xmax, ymax, zmax) = max_ ax.plot([xmin, xmax, xmax, xmin, xmin], [ymin, ymin, ymax, ymax, ymin], 'g') ax.imshow(self.traversible, origin='lower') with fu.fopen(img_path, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0)
'Saves traversible space along with nodes generated on the graph. Takes the seed as input.'
def _debug_semantic_maps(self, seed):
for (i, cls) in enumerate(self.task_params.semantic_task.class_map_names): img_path = os.path.join(self.logdir, '{:s}_flip{:d}_{:s}_graph.png'.format(self.building_name, seed, cls)) maps = (self.traversible * 1.0) maps += (0.5 * self.task.class_maps_dilated[:, :, i]) write_traversible = (((maps * 1.0) + 1.0) / 3.0) write_traversible = (write_traversible * 255.0).astype(np.uint8)[:, :, np.newaxis] write_traversible = (write_traversible + np.zeros((1, 1, 3), dtype=np.uint8)) fu.write_image(img_path, write_traversible[::(-1), :, :])
'Sets up the task field for doing navigation on the grid world.'
def _preprocess_for_task(self, seed):
if ((self.task is None) or (self.task.seed != seed)): rng = np.random.RandomState(seed) origin_loc = get_graph_origin_loc(rng, self.traversible) self.task = utils.Foo(seed=seed, origin_loc=origin_loc, n_ori=self.task_params.n_ori) G = generate_graph(self.valid_fn_vec, self.task_params.step_size, self.task.n_ori, (0, 0, 0)) (gtG, nodes, nodes_to_id) = convert_to_graph_tool(G) self.task.gtG = gtG self.task.nodes = nodes self.task.delta_theta = ((2.0 * np.pi) / (self.task.n_ori * 1.0)) self.task.nodes_to_id = nodes_to_id logging.info('Building %s, #V=%d, #E=%d', self.building_name, self.task.nodes.shape[0], self.task.gtG.num_edges()) type = self.task_params.type if (type == 'general'): _ = None elif ((type == 'room_to_room_many') or (type == 'room_to_room_back')): if (type == 'room_to_room_back'): assert (self.task_params.num_goals == 2), 'num_goals must be 2.' self.room_dims = _filter_rooms(self.room_dims, self.task_params.room_regex) xyt = self.to_actual_xyt_vec(self.task.nodes) self.task.node_room_ids = _label_nodes_with_room_id(xyt, self.room_dims) self.task.reset_kwargs = {'node_room_ids': self.task.node_room_ids} elif (type == 'rng_rejection_sampling_many'): n_bins = 20 rejection_sampling_M = self.task_params.rejection_sampling_M min_dist = self.task_params.min_dist bins = (np.arange((n_bins + 1)) / (n_bins * 1.0)) target_d = np.zeros(n_bins) target_d[...] = (1.0 / n_bins) sampling_d = get_hardness_distribution(self.task.gtG, self.task_params.max_dist, self.task_params.min_dist, np.random.RandomState(0), 4000, bins, self.task.nodes, self.task_params.n_ori, self.task_params.step_size) self.task.reset_kwargs = {'distribution_bins': bins, 'target_distribution': target_d, 'sampling_distribution': sampling_d, 'rejection_sampling_M': rejection_sampling_M, 'n_bins': n_bins, 'n_ori': self.task_params.n_ori, 'step_size': self.task_params.step_size, 'min_dist': self.task_params.min_dist} self.task.n_bins = n_bins self.task.distribution_bins = bins self.task.target_distribution = target_d self.task.sampling_distribution = sampling_d self.task.rejection_sampling_M = rejection_sampling_M if (self.logdir is not None): self._debug_save_hardness(seed) elif (type[:14] == 'to_nearest_obj'): self.room_dims = _filter_rooms(self.room_dims, self.task_params.room_regex) xyt = self.to_actual_xyt_vec(self.task.nodes) self.class_maps = (_select_classes(self.class_maps, self.class_map_names, self.task_params.semantic_task.class_map_names) * 1) self.class_map_names = self.task_params.semantic_task.class_map_names nodes_xyt = self.to_actual_xyt_vec(np.array(self.task.nodes)) tt = utils.Timer() tt.tic() if (self.task_params.type == 'to_nearest_obj_acc'): (self.task.class_maps_dilated, self.task.node_class_label) = label_nodes_with_class_geodesic(nodes_xyt, self.class_maps, (self.task_params.semantic_task.pix_distance + 8), self.map.traversible, ff_cost=1.0, fo_cost=1.0, oo_cost=4.0, connectivity=8.0) dists = [] for i in range(len(self.class_map_names)): class_nodes_ = np.where(self.task.node_class_label[:, i])[0] dists.append(get_distance_node_list(gtG, source_nodes=class_nodes_, direction='to')) self.task.dist_to_class = dists (a_, b_) = np.where(self.task.node_class_label) self.task.class_nodes = np.concatenate((a_[:, np.newaxis], b_[:, np.newaxis]), axis=1) if (self.logdir is not None): self._debug_semantic_maps(seed) self.task.reset_kwargs = {'sampling': self.task_params.semantic_task.sampling, 'class_nodes': self.task.class_nodes, 'dist_to_class': self.task.dist_to_class} if (self.logdir is not None): self._debug_save_map_nodes(seed)
'In addition to returning the action, also returns the reward that the agent receives.'
def take_action(self, current_node_ids, action, step_number):
goal_number = (step_number / self.task_params.num_steps) new_node_ids = GridWorld.take_action(self, current_node_ids, action) rewards = [] for (i, n) in enumerate(new_node_ids): reward = 0 if (n == self.episode.goal_node_ids[goal_number][i]): reward = self.task_params.reward_at_goal reward = (reward - self.task_params.reward_time_penalty) rewards.append(reward) return (new_node_ids, rewards)
'Returns the optimal action from the current node.'
def get_optimal_action(self, current_node_ids, step_number):
goal_number = (step_number / self.task_params.num_steps) gtG = self.task.gtG a = np.zeros((len(current_node_ids), self.task_params.num_actions), dtype=np.int32) d_dict = self.episode.dist_to_goal[goal_number] for (i, c) in enumerate(current_node_ids): neigh = gtG.vertex(c).out_neighbours() neigh_edge = gtG.vertex(c).out_edges() ds = np.array([d_dict[i][int(x)] for x in neigh]) ds_min = np.min(ds) for (i_, e) in enumerate(neigh_edge): if (ds[i_] == ds_min): _ = gtG.ep['action'][e] a[(i, _)] = 1 return a
'Returns the target actions from the current node.'
def get_targets(self, current_node_ids, step_number):
action = self.get_optimal_action(current_node_ids, step_number) action = np.expand_dims(action, axis=1) return vars(utils.Foo(action=action))
'Returns the list of names of the targets.'
def get_targets_name(self):
return ['action']
'Constructor. Args: image_diff_list: A list of (image, diff) tuples, with shape [batch_size, image_size, image_size, 3] and image_sizes as [32, 64, 128, 256]. params: Dict of parameters.'
def __init__(self, image_diff_list, params):
self.images = [i for (i, _) in image_diff_list] self.diffs = [((d + params['scale']) / 2) for (i, d) in image_diff_list] self.params = params
'Cross Convolution. The encoded image and kernel are of the same shape. Namely [batch_size, image_size, image_size, channels]. They are split into [image_size, image_size] image squares [kernel_size, kernel_size] kernel squares. kernel squares are used to convolute image squares.'
def _CrossConvHelper(self, encoded_image, kernel):
images = tf.expand_dims(encoded_image, 0) kernels = tf.expand_dims(kernel, 3) return tf.nn.depthwise_conv2d(images, kernels, [1, 1, 1, 1], 'SAME')
'Apply the motion kernel on the encoded_images.'
def _CrossConv(self, encoded_images):
cross_conved_images = [] kernels = tf.split(axis=3, num_or_size_splits=4, value=self.kernel) for (i, encoded_image) in enumerate(encoded_images): with tf.variable_scope(('cross_conv_%d' % i)): kernel = kernels[i] encoded_image = tf.unstack(encoded_image, axis=0) kernel = tf.unstack(kernel, axis=0) assert (len(encoded_image) == len(kernel)) assert (len(encoded_image) == self.params['batch_size']) conved_image = [] for j in xrange(len(encoded_image)): conved_image.append(self._CrossConvHelper(encoded_image[j], kernel[j])) cross_conved_images.append(tf.concat(axis=0, values=conved_image)) sys.stderr.write(('cross_conved shape: %s\n' % cross_conved_images[(-1)].get_shape())) return cross_conved_images
'Decode the cross_conved feature maps into the predicted images.'
def _BuildImageDecoder(self, cross_conved_images):
nets = [] for (i, cross_conved_image) in enumerate(cross_conved_images): with tf.variable_scope(('image_decoder_%d' % i)): stride = (64 / cross_conved_image.get_shape().as_list()[1]) nets.append(self._Deconv(cross_conved_image, 64, kernel_size=3, stride=stride)) net = tf.concat(axis=3, values=nets) net = slim.conv2d(net, 128, [9, 9], padding='SAME', stride=1) net = slim.conv2d(net, 128, [1, 1], padding='SAME', stride=1) net = slim.conv2d(net, 3, [1, 1], padding='SAME', stride=1) self.diff_output = net sys.stderr.write(('diff_output shape: %s\n' % self.diff_output.get_shape()))
'Create a DeploymentConfig. The config describes how to deploy a model across multiple clones and replicas. The model will be replicated `num_clones` times in each replica. If `clone_on_cpu` is True, each clone will placed on CPU. If `num_replicas` is 1, the model is deployed via a single process. In that case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored. If `num_replicas` is greater than 1, then `worker_device` and `ps_device` must specify TensorFlow devices for the `worker` and `ps` jobs and `num_ps_tasks` must be positive. Args: num_clones: Number of model clones to deploy in each replica. clone_on_cpu: If True clones would be placed on CPU. replica_id: Integer. Index of the replica for which the model is deployed. Usually 0 for the chief replica. num_replicas: Number of replicas to use. num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas. worker_job_name: A name for the worker job. ps_job_name: A name for the parameter server job. Raises: ValueError: If the arguments are invalid.'
def __init__(self, num_clones=1, clone_on_cpu=False, replica_id=0, num_replicas=1, num_ps_tasks=0, worker_job_name='worker', ps_job_name='ps'):
if (num_replicas > 1): if (num_ps_tasks < 1): raise ValueError('When using replicas num_ps_tasks must be positive') if ((num_replicas > 1) or (num_ps_tasks > 0)): if (not worker_job_name): raise ValueError('Must specify worker_job_name when using replicas') if (not ps_job_name): raise ValueError('Must specify ps_job_name when using parameter server') if (replica_id >= num_replicas): raise ValueError('replica_id must be less than num_replicas') self._num_clones = num_clones self._clone_on_cpu = clone_on_cpu self._replica_id = replica_id self._num_replicas = num_replicas self._num_ps_tasks = num_ps_tasks self._ps_device = (('/job:' + ps_job_name) if (num_ps_tasks > 0) else '') self._worker_device = (('/job:' + worker_job_name) if (num_ps_tasks > 0) else '')
'Returns the device to use for caching variables. Variables are cached on the worker CPU when using replicas. Returns: A device string or None if the variables do not need to be cached.'
def caching_device(self):
if (self._num_ps_tasks > 0): return (lambda op: op.device) else: return None
'Device used to create the clone and all the ops inside the clone. Args: clone_index: Int, representing the clone_index. Returns: A value suitable for `tf.device()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones".'
def clone_device(self, clone_index):
if (clone_index >= self._num_clones): raise ValueError('clone_index must be less than num_clones') device = '' if (self._num_ps_tasks > 0): device += self._worker_device if self._clone_on_cpu: device += '/device:CPU:0' else: device += ('/device:GPU:%d' % clone_index) return device
'Name scope to create the clone. Args: clone_index: Int, representing the clone_index. Returns: A name_scope suitable for `tf.name_scope()`. Raises: ValueError: if `clone_index` is greater or equal to the number of clones".'
def clone_scope(self, clone_index):
if (clone_index >= self._num_clones): raise ValueError('clone_index must be less than num_clones') scope = '' if (self._num_clones > 1): scope = ('clone_%d' % clone_index) return scope
'Device to use with the optimizer. Returns: A value suitable for `tf.device()`.'
def optimizer_device(self):
if ((self._num_ps_tasks > 0) or (self._num_clones > 0)): return (self._worker_device + '/device:CPU:0') else: return ''
'Device to use to build the inputs. Returns: A value suitable for `tf.device()`.'
def inputs_device(self):
device = '' if (self._num_ps_tasks > 0): device += self._worker_device device += '/device:CPU:0' return device
'Returns the device to use for variables created inside the clone. Returns: A value suitable for `tf.device()`.'
def variables_device(self):
device = '' if (self._num_ps_tasks > 0): device += self._ps_device device += '/device:CPU:0' class _PSDeviceChooser(object, ): 'Slim device chooser for variables when using PS.' def __init__(self, device, tasks): self._device = device self._tasks = tasks self._task = 0 def choose(self, op): if op.device: return op.device node_def = (op if isinstance(op, tf.NodeDef) else op.node_def) if node_def.op.startswith('Variable'): t = self._task self._task = ((self._task + 1) % self._tasks) d = ('%s/task:%d' % (self._device, t)) return d else: return op.device if (not self._num_ps_tasks): return device else: chooser = _PSDeviceChooser(device, self._num_ps_tasks) return chooser.choose
'A plain ResNet without extra layers before or after the ResNet blocks.'
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
with tf.variable_scope(scope, values=[inputs]): with slim.arg_scope([slim.conv2d], outputs_collections='end_points'): net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) end_points = slim.utils.convert_collection_to_dict('end_points') return (net, end_points)
'Test the end points of a tiny v1 bottleneck network.'
def testEndPointsV1(self):
blocks = [resnet_v1.resnet_v1_block('block1', base_depth=1, num_units=2, stride=2), resnet_v1.resnet_v1_block('block2', base_depth=2, num_units=2, stride=1)] inputs = create_test_input(2, 32, 16, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_plain(inputs, blocks, scope='tiny') expected = ['tiny/block1/unit_1/bottleneck_v1/shortcut', 'tiny/block1/unit_1/bottleneck_v1/conv1', 'tiny/block1/unit_1/bottleneck_v1/conv2', 'tiny/block1/unit_1/bottleneck_v1/conv3', 'tiny/block1/unit_2/bottleneck_v1/conv1', 'tiny/block1/unit_2/bottleneck_v1/conv2', 'tiny/block1/unit_2/bottleneck_v1/conv3', 'tiny/block2/unit_1/bottleneck_v1/shortcut', 'tiny/block2/unit_1/bottleneck_v1/conv1', 'tiny/block2/unit_1/bottleneck_v1/conv2', 'tiny/block2/unit_1/bottleneck_v1/conv3', 'tiny/block2/unit_2/bottleneck_v1/conv1', 'tiny/block2/unit_2/bottleneck_v1/conv2', 'tiny/block2/unit_2/bottleneck_v1/conv3'] self.assertItemsEqual(expected, end_points)
'A simplified ResNet Block stacker without output stride control.'
def _stack_blocks_nondense(self, net, blocks):
for block in blocks: with tf.variable_scope(block.scope, 'block', [net]): for (i, unit) in enumerate(block.args): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = block.unit_fn(net, rate=1, **unit) return net
'Verify the values of dense feature extraction by atrous convolution. Make sure that dense feature extraction by stack_blocks_dense() followed by subsampling gives identical results to feature extraction at the nominal network output stride using the simple self._stack_blocks_nondense() above.'
def testAtrousValuesBottleneck(self):
block = resnet_v1.resnet_v1_block blocks = [block('block1', base_depth=1, num_units=2, stride=2), block('block2', base_depth=2, num_units=2, stride=2), block('block3', base_depth=4, num_units=2, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] nominal_stride = 8 height = 30 width = 31 with slim.arg_scope(resnet_utils.resnet_arg_scope()): with slim.arg_scope([slim.batch_norm], is_training=False): for output_stride in [1, 2, 4, 8, None]: with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(1, height, width, 3) output = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() expected = self._stack_blocks_nondense(inputs, blocks) sess.run(tf.global_variables_initializer()) (output, expected) = sess.run([output, expected]) self.assertAllClose(output, expected, atol=0.0001, rtol=0.0001)
'A shallow and thin ResNet v1 for faster tests.'
def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, spatial_squeeze=True, reuse=None, scope='resnet_v1_small'):
block = resnet_v1.resnet_v1_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] return resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
'Verify dense feature extraction with atrous convolution.'
def testAtrousFullyConvolutionalValues(self):
nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) (output, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() (expected, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False) sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=0.0001, rtol=0.0001)
'A plain ResNet without extra layers before or after the ResNet blocks.'
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
with tf.variable_scope(scope, values=[inputs]): with slim.arg_scope([slim.conv2d], outputs_collections='end_points'): net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) end_points = slim.utils.convert_collection_to_dict('end_points') return (net, end_points)
'Test the end points of a tiny v2 bottleneck network.'
def testEndPointsV2(self):
blocks = [resnet_v2.resnet_v2_block('block1', base_depth=1, num_units=2, stride=2), resnet_v2.resnet_v2_block('block2', base_depth=2, num_units=2, stride=1)] inputs = create_test_input(2, 32, 16, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_plain(inputs, blocks, scope='tiny') expected = ['tiny/block1/unit_1/bottleneck_v2/shortcut', 'tiny/block1/unit_1/bottleneck_v2/conv1', 'tiny/block1/unit_1/bottleneck_v2/conv2', 'tiny/block1/unit_1/bottleneck_v2/conv3', 'tiny/block1/unit_2/bottleneck_v2/conv1', 'tiny/block1/unit_2/bottleneck_v2/conv2', 'tiny/block1/unit_2/bottleneck_v2/conv3', 'tiny/block2/unit_1/bottleneck_v2/shortcut', 'tiny/block2/unit_1/bottleneck_v2/conv1', 'tiny/block2/unit_1/bottleneck_v2/conv2', 'tiny/block2/unit_1/bottleneck_v2/conv3', 'tiny/block2/unit_2/bottleneck_v2/conv1', 'tiny/block2/unit_2/bottleneck_v2/conv2', 'tiny/block2/unit_2/bottleneck_v2/conv3'] self.assertItemsEqual(expected, end_points)
'A simplified ResNet Block stacker without output stride control.'
def _stack_blocks_nondense(self, net, blocks):
for block in blocks: with tf.variable_scope(block.scope, 'block', [net]): for (i, unit) in enumerate(block.args): with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = block.unit_fn(net, rate=1, **unit) return net
'Verify the values of dense feature extraction by atrous convolution. Make sure that dense feature extraction by stack_blocks_dense() followed by subsampling gives identical results to feature extraction at the nominal network output stride using the simple self._stack_blocks_nondense() above.'
def testAtrousValuesBottleneck(self):
block = resnet_v2.resnet_v2_block blocks = [block('block1', base_depth=1, num_units=2, stride=2), block('block2', base_depth=2, num_units=2, stride=2), block('block3', base_depth=4, num_units=2, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] nominal_stride = 8 height = 30 width = 31 with slim.arg_scope(resnet_utils.resnet_arg_scope()): with slim.arg_scope([slim.batch_norm], is_training=False): for output_stride in [1, 2, 4, 8, None]: with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(1, height, width, 3) output = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() expected = self._stack_blocks_nondense(inputs, blocks) sess.run(tf.global_variables_initializer()) (output, expected) = sess.run([output, expected]) self.assertAllClose(output, expected, atol=0.0001, rtol=0.0001)
'A shallow and thin ResNet v2 for faster tests.'
def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, spatial_squeeze=True, reuse=None, scope='resnet_v2_small'):
block = resnet_v2.resnet_v2_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
'Verify dense feature extraction with atrous convolution.'
def testAtrousFullyConvolutionalValues(self):
nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with slim.arg_scope(resnet_utils.resnet_arg_scope()): with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) (output, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() (expected, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False) sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=0.0001, rtol=0.0001)
'Constructs a SsdInceptionV2FeatureExtractor. Args: depth_multiplier: float depth multiplier for feature extractor Returns: an ssd_inception_v2_feature_extractor.SsdInceptionV2FeatureExtractor.'
def _create_feature_extractor(self, depth_multiplier):
min_depth = 32 conv_hyperparams = {} return ssd_inception_v2_feature_extractor.SSDInceptionV2FeatureExtractor(depth_multiplier, min_depth, conv_hyperparams)
'MobileNetV1 Feature Extractor for SSD Models. Args: depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops. reuse_weights: Whether to reuse variables. Default is None.'
def __init__(self, depth_multiplier, min_depth, conv_hyperparams, reuse_weights=None):
super(SSDMobileNetV1FeatureExtractor, self).__init__(depth_multiplier, min_depth, conv_hyperparams, reuse_weights)
'SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images.'
def preprocess(self, resized_inputs):
return (((2.0 / 255.0) * resized_inputs) - 1.0)
'Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]'
def extract_features(self, preprocessed_inputs):
preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = {'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [(-1), (-1), 512, 256, 256, 128]} with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: (_, image_features) = mobilenet_v1.mobilenet_v1_base(preprocessed_inputs, final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps(feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
'Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor Returns: an ssd_meta_arch.SSDFeatureExtractor object.'
def _create_feature_extractor(self, depth_multiplier):
min_depth = 32 conv_hyperparams = {} return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor(depth_multiplier, min_depth, conv_hyperparams)
'Constructor. Args: architecture: Architecture name of the Resnet V1 model. resnet_model: Definition of the Resnet V1 model. is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16.'
def __init__(self, architecture, resnet_model, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
if ((first_stage_features_stride != 8) and (first_stage_features_stride != 16)): raise ValueError('`first_stage_features_stride` must be 8 or 16.') self._architecture = architecture self._resnet_model = resnet_model super(FasterRCNNResnetV1FeatureExtractor, self).__init__(is_training, first_stage_features_stride, reuse_weights, weight_decay)
'Faster R-CNN Resnet V1 preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images.'
def preprocess(self, resized_inputs):
channel_means = [123.68, 116.779, 103.939] return (resized_inputs - [[channel_means]])
'Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation.'
def _extract_proposal_features(self, preprocessed_inputs, scope):
if (len(preprocessed_inputs.get_shape().as_list()) != 4): raise ValueError(('`preprocessed_inputs` must be 4 dimensional, got a tensor of shape %s' % preprocessed_inputs.get_shape())) shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) with tf.control_dependencies([shape_assert]): with slim.arg_scope(resnet_utils.resnet_arg_scope(batch_norm_epsilon=1e-05, batch_norm_scale=True, weight_decay=self._weight_decay)): with tf.variable_scope(self._architecture, reuse=self._reuse_weights) as var_scope: (_, activations) = self._resnet_model(preprocessed_inputs, num_classes=None, is_training=False, global_pool=False, output_stride=self._first_stage_features_stride, spatial_squeeze=False, scope=var_scope) handle = (scope + ('/%s/block3' % self._architecture)) return activations[handle]
'Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name (unused). Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal.'
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope(self._architecture, reuse=self._reuse_weights): with slim.arg_scope(resnet_utils.resnet_arg_scope(batch_norm_epsilon=1e-05, batch_norm_scale=True, weight_decay=self._weight_decay)): with slim.arg_scope([slim.batch_norm], is_training=False): blocks = [resnet_utils.Block('block4', resnet_v1.bottleneck, ([{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1}] * 3))] proposal_classifier_features = resnet_utils.stack_blocks_dense(proposal_feature_maps, blocks) return proposal_classifier_features
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
super(FasterRCNNResnet50FeatureExtractor, self).__init__('resnet_v1_50', resnet_v1.resnet_v1_50, is_training, first_stage_features_stride, reuse_weights, weight_decay)
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
super(FasterRCNNResnet101FeatureExtractor, self).__init__('resnet_v1_101', resnet_v1.resnet_v1_101, is_training, first_stage_features_stride, reuse_weights, weight_decay)
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
super(FasterRCNNResnet152FeatureExtractor, self).__init__('resnet_v1_152', resnet_v1.resnet_v1_152, is_training, first_stage_features_stride, reuse_weights, weight_decay)
'InceptionV2 Feature Extractor for SSD Models. Args: depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops. reuse_weights: Whether to reuse variables. Default is None.'
def __init__(self, depth_multiplier, min_depth, conv_hyperparams, reuse_weights=None):
super(SSDInceptionV2FeatureExtractor, self).__init__(depth_multiplier, min_depth, conv_hyperparams, reuse_weights)
'SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images.'
def preprocess(self, resized_inputs):
return (((2.0 / 255.0) * resized_inputs) - 1.0)