desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Extracts features from preprocessed inputs. This function is responsible for extracting feature maps from preprocessed images. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]'
@abstractmethod def extract_features(self, preprocessed_inputs):
pass
'SSDMetaArch Constructor. TODO: group NMS parameters + score converter into a class and loss parameters into a class and write config protos for postprocessing and losses. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. anchor_generator: an anchor_generator.AnchorGenerator object. box_predictor: a box_predictor.BoxPredictor object. box_coder: a box_coder.BoxCoder object. feature_extractor: a SSDFeatureExtractor object. matcher: a matcher.Matcher object. region_similarity_calculator: a region_similarity_calculator.RegionSimilarityCalculator object. image_resizer_fn: a callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions. See builders/image_resizer_builder.py. non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window` inputs (with all other inputs already set) and returns a dictionary hold tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes` and `num_detections`. See `post_processing. batch_multiclass_non_max_suppression` for the type and shape of these tensors. score_conversion_fn: callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. classification_loss: an object_detection.core.losses.Loss object. localization_loss: a object_detection.core.losses.Loss object. classification_loss_weight: float localization_loss_weight: float normalize_loss_by_num_matches: boolean hard_example_miner: a losses.HardExampleMiner object (can be None) add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph.'
def __init__(self, is_training, anchor_generator, box_predictor, box_coder, feature_extractor, matcher, region_similarity_calculator, image_resizer_fn, non_max_suppression_fn, score_conversion_fn, classification_loss, localization_loss, classification_loss_weight, localization_loss_weight, normalize_loss_by_num_matches, hard_example_miner, add_summaries=True):
super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes) self._is_training = is_training self._extract_features_scope = 'FeatureExtractor' self._anchor_generator = anchor_generator self._box_predictor = box_predictor self._box_coder = box_coder self._feature_extractor = feature_extractor self._matcher = matcher self._region_similarity_calculator = region_similarity_calculator unmatched_cls_target = None unmatched_cls_target = tf.constant(([1] + (self.num_classes * [0])), tf.float32) self._target_assigner = target_assigner.TargetAssigner(self._region_similarity_calculator, self._matcher, self._box_coder, positive_class_weight=1.0, negative_class_weight=1.0, unmatched_cls_target=unmatched_cls_target) self._classification_loss = classification_loss self._localization_loss = localization_loss self._classification_loss_weight = classification_loss_weight self._localization_loss_weight = localization_loss_weight self._normalize_loss_by_num_matches = normalize_loss_by_num_matches self._hard_example_miner = hard_example_miner self._image_resizer_fn = image_resizer_fn self._non_max_suppression_fn = non_max_suppression_fn self._score_conversion_fn = score_conversion_fn self._anchors = None self._add_summaries = add_summaries
'Feature-extractor specific preprocessing. See base class. Args: inputs: a [batch, height_in, width_in, channels] float tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, height_out, width_out, channels] float tensor representing a batch of images. Raises: ValueError: if inputs tensor does not have type tf.float32'
def preprocess(self, inputs):
if (inputs.dtype is not tf.float32): raise ValueError('`preprocess` expects a tf.float32 tensor') with tf.name_scope('Preprocessor'): resized_inputs = tf.map_fn(self._image_resizer_fn, elems=inputs, dtype=tf.float32) return self._feature_extractor.preprocess(resized_inputs)
'Predicts unpostprocessed tensors from input tensor. This function takes an input batch of images and runs it through the forward pass of the network to yield unpostprocessesed predictions. A side effect of calling the predict method is that self._anchors is populated with a box_list.BoxList of anchors. These anchors must be constructed before the postprocess or loss functions can be called. Args: preprocessed_inputs: a [batch, height, width, channels] image tensor. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). 3) feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i].'
def predict(self, preprocessed_inputs):
with tf.variable_scope(None, self._extract_features_scope, [preprocessed_inputs]): feature_maps = self._feature_extractor.extract_features(preprocessed_inputs) feature_map_spatial_dims = self._get_feature_map_spatial_dims(feature_maps) self._anchors = self._anchor_generator.generate(feature_map_spatial_dims) (box_encodings, class_predictions_with_background) = self._add_box_predictions_to_feature_maps(feature_maps) predictions_dict = {'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps} return predictions_dict
'Adds box predictors to each feature map and returns concatenated results. Args: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Returns: box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. class_predictions_with_background: 2-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). Raises: RuntimeError: if the number of feature maps extracted via the extract_features method does not match the length of the num_anchors_per_locations list that was passed to the constructor. RuntimeError: if box_encodings from the box_predictor does not have shape of the form [batch_size, num_anchors, 1, code_size].'
def _add_box_predictions_to_feature_maps(self, feature_maps):
num_anchors_per_location_list = self._anchor_generator.num_anchors_per_location() if (len(feature_maps) != len(num_anchors_per_location_list)): raise RuntimeError('the number of feature maps must match the length of self.anchors.NumAnchorsPerLocation().') box_encodings_list = [] cls_predictions_with_background_list = [] for (idx, (feature_map, num_anchors_per_location)) in enumerate(zip(feature_maps, num_anchors_per_location_list)): box_predictor_scope = 'BoxPredictor_{}'.format(idx) box_predictions = self._box_predictor.predict(feature_map, num_anchors_per_location, box_predictor_scope) box_encodings = box_predictions[bpredictor.BOX_ENCODINGS] cls_predictions_with_background = box_predictions[bpredictor.CLASS_PREDICTIONS_WITH_BACKGROUND] box_encodings_shape = box_encodings.get_shape().as_list() if ((len(box_encodings_shape) != 4) or (box_encodings_shape[2] != 1)): raise RuntimeError('box_encodings from the box_predictor must be of shape `[batch_size, num_anchors, 1, code_size]`; actual shape', box_encodings_shape) box_encodings = tf.squeeze(box_encodings, axis=2) box_encodings_list.append(box_encodings) cls_predictions_with_background_list.append(cls_predictions_with_background) num_predictions = sum([tf.shape(box_encodings)[1] for box_encodings in box_encodings_list]) num_anchors = self.anchors.num_boxes() anchors_assert = tf.assert_equal(num_anchors, num_predictions, ['Mismatch: number of anchors vs number of predictions', num_anchors, num_predictions]) with tf.control_dependencies([anchors_assert]): box_encodings = tf.concat(box_encodings_list, 1) class_predictions_with_background = tf.concat(cls_predictions_with_background_list, 1) return (box_encodings, class_predictions_with_background)
'Return list of spatial dimensions for each feature map in a list. Args: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]. Returns: a list of pairs (height, width) for each feature map in feature_maps'
def _get_feature_map_spatial_dims(self, feature_maps):
feature_map_shapes = [shape_utils.combined_static_and_dynamic_shape(feature_map) for feature_map in feature_maps] return [(shape[1], shape[2]) for shape in feature_map_shapes]
'Converts prediction tensors to final detections. This function converts raw predictions tensors to final detection results by slicing off the background class, decoding box predictions and applying non max suppression and clipping to the image window. See base class for output format conventions. Note also that by default, scores are to be interpreted as logits, but if a score_conversion_fn is used, then scores are remapped (and may thus have a different interpretation). Args: prediction_dict: a dictionary holding prediction tensors with 1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 2-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detection, 4] detection_scores: [batch, max_detections] detection_classes: [batch, max_detections] num_detections: [batch] Raises: ValueError: if prediction_dict does not contain `box_encodings` or `class_predictions_with_background` fields.'
def postprocess(self, prediction_dict):
if (('box_encodings' not in prediction_dict) or ('class_predictions_with_background' not in prediction_dict)): raise ValueError('prediction_dict does not contain expected entries.') with tf.name_scope('Postprocessor'): box_encodings = prediction_dict['box_encodings'] class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes = self._batch_decode(box_encodings) detection_boxes = tf.expand_dims(detection_boxes, axis=2) class_predictions_without_background = tf.slice(class_predictions, [0, 0, 1], [(-1), (-1), (-1)]) detection_scores = self._score_conversion_fn(class_predictions_without_background) clip_window = tf.constant([0, 0, 1, 1], tf.float32) (nmsed_boxes, nmsed_scores, nmsed_classes, _, num_detections) = self._non_max_suppression_fn(detection_boxes, detection_scores, clip_window=clip_window) return {'detection_boxes': nmsed_boxes, 'detection_scores': nmsed_scores, 'detection_classes': nmsed_classes, 'num_detections': tf.to_float(num_detections)}
'Compute scalar loss tensors with respect to provided groundtruth. Calling this function requires that groundtruth tensors have been provided via the provide_groundtruth function. Args: prediction_dict: a dictionary holding prediction tensors with 1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 2-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. scope: Optional scope name. Returns: a dictionary mapping loss keys (`localization_loss` and `classification_loss`) to scalar tensors representing corresponding loss values.'
def loss(self, prediction_dict, scope=None):
with tf.name_scope(scope, 'Loss', prediction_dict.values()): (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, match_list) = self._assign_targets(self.groundtruth_lists(fields.BoxListFields.boxes), self.groundtruth_lists(fields.BoxListFields.classes)) if self._add_summaries: self._summarize_input(self.groundtruth_lists(fields.BoxListFields.boxes), match_list) num_matches = tf.stack([match.num_matched_columns() for match in match_list]) location_losses = self._localization_loss(prediction_dict['box_encodings'], batch_reg_targets, weights=batch_reg_weights) cls_losses = self._classification_loss(prediction_dict['class_predictions_with_background'], batch_cls_targets, weights=batch_cls_weights) localization_loss = tf.reduce_sum(location_losses) classification_loss = tf.reduce_sum(cls_losses) if self._hard_example_miner: (localization_loss, classification_loss) = self._apply_hard_mining(location_losses, cls_losses, prediction_dict, match_list) if self._add_summaries: self._hard_example_miner.summarize() normalizer = tf.constant(1.0, dtype=tf.float32) if self._normalize_loss_by_num_matches: normalizer = tf.maximum(tf.to_float(tf.reduce_sum(num_matches)), 1.0) loss_dict = {'localization_loss': ((self._localization_loss_weight / normalizer) * localization_loss), 'classification_loss': ((self._classification_loss_weight / normalizer) * classification_loss)} return loss_dict
'Assign groundtruth targets. Adds a background class to each one-hot encoding of groundtruth classes and uses target assigner to obtain regression and classification targets. Args: groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] containing coordinates of the groundtruth boxes. Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and assumed to be normalized and clipped relative to the image window with y_min <= y_max and x_min <= x_max. groundtruth_classes_list: a list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes] containing the class targets with the 0th index assumed to map to the first non-background class. Returns: batch_cls_targets: a tensor with shape [batch_size, num_anchors, num_classes], batch_cls_weights: a tensor with shape [batch_size, num_anchors], batch_reg_targets: a tensor with shape [batch_size, num_anchors, box_code_dimension] batch_reg_weights: a tensor with shape [batch_size, num_anchors], match_list: a list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors.'
def _assign_targets(self, groundtruth_boxes_list, groundtruth_classes_list):
groundtruth_boxlists = [box_list.BoxList(boxes) for boxes in groundtruth_boxes_list] groundtruth_classes_with_background_list = [tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT') for one_hot_encoding in groundtruth_classes_list] return target_assigner.batch_assign_targets(self._target_assigner, self.anchors, groundtruth_boxlists, groundtruth_classes_with_background_list)
'Creates tensorflow summaries for the input boxes and anchors. This function creates four summaries corresponding to the average number (over images in a batch) of (1) groundtruth boxes, (2) anchors marked as positive, (3) anchors marked as negative, and (4) anchors marked as ignored. Args: groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4] containing corners of the groundtruth boxes. match_list: a list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors.'
def _summarize_input(self, groundtruth_boxes_list, match_list):
num_boxes_per_image = tf.stack([tf.shape(x)[0] for x in groundtruth_boxes_list]) pos_anchors_per_image = tf.stack([match.num_matched_columns() for match in match_list]) neg_anchors_per_image = tf.stack([match.num_unmatched_columns() for match in match_list]) ignored_anchors_per_image = tf.stack([match.num_ignored_columns() for match in match_list]) tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage', tf.reduce_mean(tf.to_float(num_boxes_per_image))) tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage', tf.reduce_mean(tf.to_float(pos_anchors_per_image))) tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage', tf.reduce_mean(tf.to_float(neg_anchors_per_image))) tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage', tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))
'Applies hard mining to anchorwise losses. Args: location_losses: Float tensor of shape [batch_size, num_anchors] representing anchorwise location losses. cls_losses: Float tensor of shape [batch_size, num_anchors] representing anchorwise classification losses. prediction_dict: p a dictionary holding prediction tensors with 1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 2-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. match_list: a list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. Returns: mined_location_loss: a float scalar with sum of localization losses from selected hard examples. mined_cls_loss: a float scalar with sum of classification losses from selected hard examples.'
def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict, match_list):
class_pred_shape = [(-1), self.anchors.num_boxes_static(), self.num_classes] class_predictions = tf.reshape(tf.slice(prediction_dict['class_predictions_with_background'], [0, 0, 1], class_pred_shape), class_pred_shape) decoded_boxes = self._batch_decode(prediction_dict['box_encodings']) decoded_box_tensors_list = tf.unstack(decoded_boxes) class_prediction_list = tf.unstack(class_predictions) decoded_boxlist_list = [] for (box_location, box_score) in zip(decoded_box_tensors_list, class_prediction_list): decoded_boxlist = box_list.BoxList(box_location) decoded_boxlist.add_field('scores', box_score) decoded_boxlist_list.append(decoded_boxlist) return self._hard_example_miner(location_losses=location_losses, cls_losses=cls_losses, decoded_boxlist_list=decoded_boxlist_list, match_list=match_list)
'Decodes a batch of box encodings with respect to the anchors. Args: box_encodings: A float32 tensor of shape [batch_size, num_anchors, box_code_size] containing box encodings. Returns: decoded_boxes: A float32 tensor of shape [batch_size, num_anchors, 4] containing the decoded boxes.'
def _batch_decode(self, box_encodings):
combined_shape = shape_utils.combined_static_and_dynamic_shape(box_encodings) batch_size = combined_shape[0] tiled_anchor_boxes = tf.tile(tf.expand_dims(self.anchors.get(), 0), [batch_size, 1, 1]) tiled_anchors_boxlist = box_list.BoxList(tf.reshape(tiled_anchor_boxes, [(-1), self._box_coder.code_size])) decoded_boxes = self._box_coder.decode(tf.reshape(box_encodings, [(-1), self._box_coder.code_size]), tiled_anchors_boxlist) return tf.reshape(decoded_boxes.get(), tf.stack([combined_shape[0], combined_shape[1], 4]))
'Returns a map of variables to load from a foreign checkpoint. See parent class for details. Args: from_detection_checkpoint: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph.'
def restore_map(self, from_detection_checkpoint=True):
variables_to_restore = {} for variable in tf.all_variables(): if variable.op.name.startswith(self._extract_features_scope): var_name = variable.op.name if (not from_detection_checkpoint): var_name = re.split((('^' + self._extract_features_scope) + '/'), var_name)[(-1)] variables_to_restore[var_name] = variable return variables_to_restore
'RFCNMetaArch Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. num_classes: Number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). image_resizer_fn: A callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions. See builders/image_resizer_builder.py. feature_extractor: A FasterRCNNFeatureExtractor object. first_stage_only: Whether to construct only the Region Proposal Network (RPN) part of the model. first_stage_anchor_generator: An anchor_generator.AnchorGenerator object (note that currently we only support grid_anchor_generator.GridAnchorGenerator objects) first_stage_atrous_rate: A single integer indicating the atrous rate for the single convolution op which is applied to the `rpn_features_to_crop` tensor to obtain a tensor to be used for box prediction. Some feature extractors optionally allow for producing feature maps computed at denser resolutions. The atrous rate is used to compensate for the denser feature maps by using an effectively larger receptive field. (This should typically be set to 1). first_stage_box_predictor_arg_scope: Slim arg_scope for conv2d, separable_conv2d and fully_connected ops for the RPN box predictor. first_stage_box_predictor_kernel_size: Kernel size to use for the convolution op just prior to RPN box predictions. first_stage_box_predictor_depth: Output depth for the convolution op just prior to RPN box predictions. first_stage_minibatch_size: The "batch size" to use for computing the objectness and location loss of the region proposal network. This "batch size" refers to the number of anchors selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. first_stage_positive_balance_fraction: Fraction of positive examples per image for the RPN. The recommended value for Faster RCNN is 0.5. first_stage_nms_score_threshold: Score threshold for non max suppression for the Region Proposal Network (RPN). This value is expected to be in [0, 1] as it is applied directly after a softmax transformation. The recommended value for Faster R-CNN is 0. first_stage_nms_iou_threshold: The Intersection Over Union (IOU) threshold for performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_max_proposals: Maximum number of boxes to retain after performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_localization_loss_weight: A float first_stage_objectness_loss_weight: A float second_stage_rfcn_box_predictor: RFCN box predictor to use for second stage. second_stage_batch_size: The batch size used for computing the classification and refined location loss of the box classifier. This "batch size" refers to the number of proposals selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. second_stage_balance_fraction: Fraction of positive examples to use per image for the box classifier. The recommended value for Faster RCNN is 0.25. second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores`, optional `clip_window` and optional (kwarg) `mask` inputs (with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`, and (optionally) `detection_masks`. See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. second_stage_score_conversion_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. second_stage_localization_loss_weight: A float second_stage_classification_loss_weight: A float hard_example_miner: A losses.HardExampleMiner object (can be None). parallel_iterations: (Optional) The number of iterations allowed to run in parallel for calls to tf.map_fn. Raises: ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` ValueError: If first_stage_anchor_generator is not of type grid_anchor_generator.GridAnchorGenerator.'
def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, first_stage_only, first_stage_anchor_generator, first_stage_atrous_rate, first_stage_box_predictor_arg_scope, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_positive_balance_fraction, first_stage_nms_score_threshold, first_stage_nms_iou_threshold, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, second_stage_rfcn_box_predictor, second_stage_batch_size, second_stage_balance_fraction, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, hard_example_miner, parallel_iterations=16):
super(RFCNMetaArch, self).__init__(is_training, num_classes, image_resizer_fn, feature_extractor, first_stage_only, first_stage_anchor_generator, first_stage_atrous_rate, first_stage_box_predictor_arg_scope, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_positive_balance_fraction, first_stage_nms_score_threshold, first_stage_nms_iou_threshold, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, None, None, None, None, second_stage_batch_size, second_stage_balance_fraction, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, hard_example_miner, parallel_iterations) self._rfcn_box_predictor = second_stage_rfcn_box_predictor
'Predicts the output tensors from 2nd stage of FasterRCNN. Args: rpn_box_encodings: 4-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes. rpn_objectness_predictions_with_background: 2-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). rpn_features: A 4-D float32 tensor with shape [batch_size, height, width, depth] representing image features from the RPN. anchors: 2-D float tensor of shape [num_anchors, self._box_coder.code_size]. image_shape: A 1D int32 tensors of size [4] containing the image shape. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, 4] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals 2) class_predictions_with_background: a 3-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes (in absolute coordinates).'
def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features, anchors, image_shape):
(proposal_boxes_normalized, _, num_proposals) = self._postprocess_rpn(rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape) box_classifier_features = self._feature_extractor.extract_box_classifier_features(rpn_features, scope=self.second_stage_feature_extractor_scope) box_predictions = self._rfcn_box_predictor.predict(box_classifier_features, num_predictions_per_location=1, scope=self.second_stage_box_predictor_scope, proposal_boxes=proposal_boxes_normalized) refined_box_encodings = tf.squeeze(box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.squeeze(box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) absolute_proposal_boxes = ops.normalized_to_image_coordinates(proposal_boxes_normalized, image_shape, parallel_iterations=self._parallel_iterations) prediction_dict = {'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': absolute_proposal_boxes} return prediction_dict
'Set up mock SSD model. Here we set up a simple mock SSD model that will always predict 4 detections that happen to always be exactly the anchors that are set up in the above MockAnchorGenerator. Because we let max_detections=5, we will also always end up with an extra padded row in the detection results.'
def setUp(self):
is_training = False self._num_classes = 1 mock_anchor_generator = MockAnchorGenerator2x2() mock_box_predictor = test_utils.MockBoxPredictor(is_training, self._num_classes) mock_box_coder = test_utils.MockBoxCoder() fake_feature_extractor = FakeSSDFeatureExtractor() mock_matcher = test_utils.MockMatcher() region_similarity_calculator = sim_calc.IouSimilarity() def image_resizer_fn(image): return tf.identity(image) classification_loss = losses.WeightedSigmoidClassificationLoss(anchorwise_output=True) localization_loss = losses.WeightedSmoothL1LocalizationLoss(anchorwise_output=True) non_max_suppression_fn = functools.partial(post_processing.batch_multiclass_non_max_suppression, score_thresh=(-20.0), iou_thresh=1.0, max_size_per_class=5, max_total_size=5) classification_loss_weight = 1.0 localization_loss_weight = 1.0 normalize_loss_by_num_matches = False hard_example_miner = losses.HardExampleMiner(num_hard_examples=None, iou_threshold=1.0) self._num_anchors = 4 self._code_size = 4 self._model = ssd_meta_arch.SSDMetaArch(is_training, mock_anchor_generator, mock_box_predictor, mock_box_coder, fake_feature_extractor, mock_matcher, region_similarity_calculator, image_resizer_fn, non_max_suppression_fn, tf.identity, classification_loss, localization_loss, classification_loss_weight, localization_loss_weight, normalize_loss_by_num_matches, hard_example_miner)
'Create a GRU object. Args: num_units: Number of units in the GRU forget_bias (optional): Hack to help learning. weight_scale (optional): weights are scaled by ws/sqrt(#inputs), with ws being the weight scale. clip_value (optional): if the recurrent values grow above this value, clip them. collections (optional): List of additonal collections variables should belong to.'
def __init__(self, num_units, forget_bias=1.0, weight_scale=1.0, clip_value=np.inf, collections=None):
self._num_units = num_units self._forget_bias = forget_bias self._weight_scale = weight_scale self._clip_value = clip_value self._collections = collections
'Return the output portion of the state.'
def output_from_state(self, state):
return state
'Gated recurrent unit (GRU) function. Args: inputs: A 2D batch x input_dim tensor of inputs. state: The previous state from the last time step. scope (optional): TF variable scope for defined GRU variables. Returns: A tuple (state, state), where state is the newly computed state at time t. It is returned twice to respect an interface that works for LSTMs.'
def __call__(self, inputs, state, scope=None):
x = inputs h = state if (inputs is not None): xh = tf.concat(axis=1, values=[x, h]) else: xh = h with tf.variable_scope((scope or type(self).__name__)): with tf.variable_scope('Gates'): (r, u) = tf.split(axis=1, num_or_size_splits=2, value=linear(xh, (2 * self._num_units), alpha=self._weight_scale, name='xh_2_ru', collections=self._collections)) (r, u) = (tf.sigmoid(r), tf.sigmoid((u + self._forget_bias))) with tf.variable_scope('Candidate'): xrh = tf.concat(axis=1, values=[x, (r * h)]) c = tf.tanh(linear(xrh, self._num_units, name='xrh_2_c', collections=self._collections)) new_h = ((u * h) + ((1 - u) * c)) new_h = tf.clip_by_value(new_h, (- self._clip_value), self._clip_value) return (new_h, new_h)
'Create a GRU object. Args: num_units: Number of units in the GRU forget_bias (optional): Hack to help learning. input_weight_scale (optional): weights are scaled ws/sqrt(#inputs), with ws being the weight scale. rec_weight_scale (optional): weights are scaled ws/sqrt(#inputs), with ws being the weight scale. clip_value (optional): if the recurrent values grow above this value, clip them. input_collections (optional): List of additonal collections variables that input->rec weights should belong to. recurrent_collections (optional): List of additonal collections variables that rec->rec weights should belong to.'
def __init__(self, num_units, forget_bias=1.0, input_weight_scale=1.0, rec_weight_scale=1.0, clip_value=np.inf, input_collections=None, recurrent_collections=None):
self._num_units = num_units self._forget_bias = forget_bias self._input_weight_scale = input_weight_scale self._rec_weight_scale = rec_weight_scale self._clip_value = clip_value self._input_collections = input_collections self._rec_collections = recurrent_collections
'Return the output portion of the state.'
def output_from_state(self, state):
return state
'Gated recurrent unit (GRU) function. Args: inputs: A 2D batch x input_dim tensor of inputs. state: The previous state from the last time step. scope (optional): TF variable scope for defined GRU variables. Returns: A tuple (state, state), where state is the newly computed state at time t. It is returned twice to respect an interface that works for LSTMs.'
def __call__(self, inputs, state, scope=None):
x = inputs h = state with tf.variable_scope((scope or type(self).__name__)): with tf.variable_scope('Gates'): r_x = u_x = 0.0 if (x is not None): (r_x, u_x) = tf.split(axis=1, num_or_size_splits=2, value=linear(x, (2 * self._num_units), alpha=self._input_weight_scale, do_bias=False, name='x_2_ru', normalized=False, collections=self._input_collections)) (r_h, u_h) = tf.split(axis=1, num_or_size_splits=2, value=linear(h, (2 * self._num_units), do_bias=True, alpha=self._rec_weight_scale, name='h_2_ru', collections=self._rec_collections)) r = (r_x + r_h) u = (u_x + u_h) (r, u) = (tf.sigmoid(r), tf.sigmoid((u + self._forget_bias))) with tf.variable_scope('Candidate'): c_x = 0.0 if (x is not None): c_x = linear(x, self._num_units, name='x_2_c', do_bias=False, alpha=self._input_weight_scale, normalized=False, collections=self._input_collections) c_rh = linear((r * h), self._num_units, name='rh_2_c', do_bias=True, alpha=self._rec_weight_scale, collections=self._rec_collections) c = tf.tanh((c_x + c_rh)) new_h = ((u * h) + ((1 - u) * c)) new_h = tf.clip_by_value(new_h, (- self._clip_value), self._clip_value) return (new_h, new_h)
'Create an LFADS model. train - a model for training, sampling of posteriors is used posterior_sample_and_average - sample from the posterior, this is used for evaluating the expected value of the outputs of LFADS, given a specific input, by averaging over multiple samples from the approx posterior. Also used for the lower bound on the negative log-likelihood using IWAE error (Importance Weighed Auto-encoder). This is the denoising operation. prior_sample - a model for generation - sampling from priors is used Args: hps: The dictionary of hyper parameters. kind: the type of model to build (see above). datasets: a dictionary of named data_dictionaries, see top of lfads.py'
def __init__(self, hps, kind='train', datasets=None):
print('Building graph...') all_kinds = ['train', 'posterior_sample_and_average', 'prior_sample'] assert (kind in all_kinds), 'Wrong kind' if (hps.feedback_factors_or_rates == 'rates'): assert (len(hps.dataset_names) == 1), 'Multiple datasets not supported for rate feedback.' num_steps = hps.num_steps ic_dim = hps.ic_dim co_dim = hps.co_dim ext_input_dim = hps.ext_input_dim cell_class = GRU gen_cell_class = GenGRU def makelambda(v): return (lambda : v) self.dataName = tf.placeholder(tf.string, shape=()) if (hps.output_dist == 'poisson'): assert np.issubdtype(datasets[hps.dataset_names[0]]['train_data'].dtype, int), 'Data dtype must be int for poisson output distribution' data_dtype = tf.int32 elif (hps.output_dist == 'gaussian'): assert np.issubdtype(datasets[hps.dataset_names[0]]['train_data'].dtype, float), 'Data dtype must be float for gaussian output dsitribution' data_dtype = tf.float32 else: assert False, 'NIY' self.dataset_ph = dataset_ph = tf.placeholder(data_dtype, [None, num_steps, None], name='data') self.train_step = tf.get_variable('global_step', [], tf.int64, tf.zeros_initializer(), trainable=False) self.hps = hps ndatasets = hps.ndatasets factors_dim = hps.factors_dim self.preds = preds = ([None] * ndatasets) self.fns_in_fac_Ws = fns_in_fac_Ws = ([None] * ndatasets) self.fns_in_fatcor_bs = fns_in_fac_bs = ([None] * ndatasets) self.fns_out_fac_Ws = fns_out_fac_Ws = ([None] * ndatasets) self.fns_out_fac_bs = fns_out_fac_bs = ([None] * ndatasets) self.datasetNames = dataset_names = hps.dataset_names self.ext_inputs = ext_inputs = None if (len(dataset_names) == 1): if ('alignment_matrix_cxf' in datasets[dataset_names[0]].keys()): used_in_factors_dim = factors_dim in_identity_if_poss = False else: used_in_factors_dim = hps.dataset_dims[dataset_names[0]] in_identity_if_poss = True else: used_in_factors_dim = factors_dim in_identity_if_poss = False for (d, name) in enumerate(dataset_names): data_dim = hps.dataset_dims[name] in_mat_cxf = None in_bias_1xf = None align_bias_1xc = None if (datasets and ('alignment_matrix_cxf' in datasets[name].keys())): dataset = datasets[name] print('Using alignment matrix provided for dataset:', name) in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32) if (in_mat_cxf.shape != (data_dim, factors_dim)): raise ValueError(('Alignment matrix must have dimensions %d x %d\n (data_dim x factors_dim), but currently has %d x %d.' % (data_dim, factors_dim, in_mat_cxf.shape[0], in_mat_cxf.shape[1]))) if (datasets and ('alignment_bias_c' in datasets[name].keys())): dataset = datasets[name] print('Using alignment bias provided for dataset:', name) align_bias_c = dataset['alignment_bias_c'].astype(np.float32) align_bias_1xc = np.expand_dims(align_bias_c, axis=0) if (align_bias_1xc.shape[1] != data_dim): raise ValueError(('Alignment bias must have dimensions %d\n (data_dim), but currently has %d.' % (data_dim, in_mat_cxf.shape[0]))) if ((in_mat_cxf is not None) and (align_bias_1xc is not None)): in_bias_1xf = (- np.dot(align_bias_1xc, in_mat_cxf)) in_fac_lin = init_linear(data_dim, used_in_factors_dim, do_bias=True, mat_init_value=in_mat_cxf, bias_init_value=in_bias_1xf, identity_if_possible=in_identity_if_poss, normalized=False, name=('x_2_infac_' + name), collections=['IO_transformations']) (in_fac_W, in_fac_b) = in_fac_lin fns_in_fac_Ws[d] = makelambda(in_fac_W) fns_in_fac_bs[d] = makelambda(in_fac_b) with tf.variable_scope('glm'): out_identity_if_poss = False if ((len(dataset_names) == 1) and (factors_dim == hps.dataset_dims[dataset_names[0]])): out_identity_if_poss = True for (d, name) in enumerate(dataset_names): data_dim = hps.dataset_dims[name] in_mat_cxf = None if (datasets and ('alignment_matrix_cxf' in datasets[name].keys())): dataset = datasets[name] in_mat_cxf = dataset['alignment_matrix_cxf'].astype(np.float32) if (datasets and ('alignment_bias_c' in datasets[name].keys())): dataset = datasets[name] align_bias_c = dataset['alignment_bias_c'].astype(np.float32) align_bias_1xc = np.expand_dims(align_bias_c, axis=0) out_mat_fxc = None out_bias_1xc = None if (in_mat_cxf is not None): out_mat_fxc = np.linalg.pinv(in_mat_cxf) if (align_bias_1xc is not None): out_bias_1xc = align_bias_1xc if (hps.output_dist == 'poisson'): out_fac_lin = init_linear(factors_dim, data_dim, do_bias=True, mat_init_value=out_mat_fxc, bias_init_value=out_bias_1xc, identity_if_possible=out_identity_if_poss, normalized=False, name=('fac_2_logrates_' + name), collections=['IO_transformations']) (out_fac_W, out_fac_b) = out_fac_lin elif (hps.output_dist == 'gaussian'): out_fac_lin_mean = init_linear(factors_dim, data_dim, do_bias=True, mat_init_value=out_mat_fxc, bias_init_value=out_bias_1xc, normalized=False, name=('fac_2_means_' + name), collections=['IO_transformations']) (out_fac_W_mean, out_fac_b_mean) = out_fac_lin_mean mat_init_value = np.zeros([factors_dim, data_dim]).astype(np.float32) bias_init_value = np.ones([1, data_dim]).astype(np.float32) out_fac_lin_logvar = init_linear(factors_dim, data_dim, do_bias=True, mat_init_value=mat_init_value, bias_init_value=bias_init_value, normalized=False, name=('fac_2_logvars_' + name), collections=['IO_transformations']) (out_fac_W_mean, out_fac_b_mean) = out_fac_lin_mean (out_fac_W_logvar, out_fac_b_logvar) = out_fac_lin_logvar out_fac_W = tf.concat(axis=1, values=[out_fac_W_mean, out_fac_W_logvar]) out_fac_b = tf.concat(axis=1, values=[out_fac_b_mean, out_fac_b_logvar]) else: assert False, 'NIY' preds[d] = tf.equal(tf.constant(name), self.dataName) data_dim = hps.dataset_dims[name] fns_out_fac_Ws[d] = makelambda(out_fac_W) fns_out_fac_bs[d] = makelambda(out_fac_b) pf_pairs_in_fac_Ws = zip(preds, fns_in_fac_Ws) pf_pairs_in_fac_bs = zip(preds, fns_in_fac_bs) pf_pairs_out_fac_Ws = zip(preds, fns_out_fac_Ws) pf_pairs_out_fac_bs = zip(preds, fns_out_fac_bs) def _case_with_no_default(pairs): def _default_value_fn(): with tf.control_dependencies([tf.Assert(False, ['Reached default'])]): return tf.identity(pairs[0][1]()) return tf.case(pairs, _default_value_fn, exclusive=True) this_in_fac_W = _case_with_no_default(pf_pairs_in_fac_Ws) this_in_fac_b = _case_with_no_default(pf_pairs_in_fac_bs) this_out_fac_W = _case_with_no_default(pf_pairs_out_fac_Ws) this_out_fac_b = _case_with_no_default(pf_pairs_out_fac_bs) if (hps.ext_input_dim > 0): self.ext_input = tf.placeholder(tf.float32, [None, num_steps, ext_input_dim], name='ext_input') else: self.ext_input = None ext_input_bxtxi = self.ext_input self.keep_prob = keep_prob = tf.placeholder(tf.float32, [], 'keep_prob') self.batch_size = batch_size = int(hps.batch_size) self.learning_rate = tf.Variable(float(hps.learning_rate_init), trainable=False, name='learning_rate') self.learning_rate_decay_op = self.learning_rate.assign((self.learning_rate * hps.learning_rate_decay_factor)) dataset_do_bxtxd = tf.nn.dropout(tf.to_float(dataset_ph), keep_prob) if (hps.ext_input_dim > 0): ext_input_do_bxtxi = tf.nn.dropout(ext_input_bxtxi, keep_prob) else: ext_input_do_bxtxi = None def encode_data(dataset_bxtxd, enc_cell, name, forward_or_reverse, num_steps_to_encode): 'Encode data for LFADS\n Args:\n dataset_bxtxd - the data to encode, as a 3 tensor, with dims\n time x batch x data dims.\n enc_cell: encoder cell\n name: name of encoder\n forward_or_reverse: string, encode in forward or reverse direction\n num_steps_to_encode: number of steps to encode, 0:num_steps_to_encode\n Returns:\n encoded data as a list with num_steps_to_encode items, in order\n ' if (forward_or_reverse == 'forward'): dstr = '_fwd' time_fwd_or_rev = range(num_steps_to_encode) else: dstr = '_rev' time_fwd_or_rev = reversed(range(num_steps_to_encode)) with tf.variable_scope(((name + '_enc') + dstr), reuse=False): enc_state = tf.tile(tf.Variable(tf.zeros([1, enc_cell.state_size]), name=((name + '_enc_t0') + dstr)), tf.stack([batch_size, 1])) enc_state.set_shape([None, enc_cell.state_size]) enc_outs = ([None] * num_steps_to_encode) for (i, t) in enumerate(time_fwd_or_rev): with tf.variable_scope(((name + '_enc') + dstr), reuse=(True if (i > 0) else None)): dataset_t_bxd = dataset_bxtxd[:, t, :] in_fac_t_bxf = (tf.matmul(dataset_t_bxd, this_in_fac_W) + this_in_fac_b) in_fac_t_bxf.set_shape([None, used_in_factors_dim]) if ((ext_input_dim > 0) and (not hps.inject_ext_input_to_gen)): ext_input_t_bxi = ext_input_do_bxtxi[:, t, :] enc_input_t_bxfpe = tf.concat(axis=1, values=[in_fac_t_bxf, ext_input_t_bxi]) else: enc_input_t_bxfpe = in_fac_t_bxf (enc_out, enc_state) = enc_cell(enc_input_t_bxfpe, enc_state) enc_outs[t] = enc_out return enc_outs self.ic_enc_fwd = ([None] * num_steps) self.ic_enc_rev = ([None] * num_steps) if (ic_dim > 0): enc_ic_cell = cell_class(hps.ic_enc_dim, weight_scale=hps.cell_weight_scale, clip_value=hps.cell_clip_value) ic_enc_fwd = encode_data(dataset_do_bxtxd, enc_ic_cell, 'ic', 'forward', hps.num_steps_for_gen_ic) ic_enc_rev = encode_data(dataset_do_bxtxd, enc_ic_cell, 'ic', 'reverse', hps.num_steps_for_gen_ic) self.ic_enc_fwd = ic_enc_fwd self.ic_enc_rev = ic_enc_rev self.ci_enc_fwd = ([None] * num_steps) self.ci_enc_rev = ([None] * num_steps) if (co_dim > 0): enc_ci_cell = cell_class(hps.ci_enc_dim, weight_scale=hps.cell_weight_scale, clip_value=hps.cell_clip_value) ci_enc_fwd = encode_data(dataset_do_bxtxd, enc_ci_cell, 'ci', 'forward', hps.num_steps) if hps.do_causal_controller: ci_enc_rev = None else: ci_enc_rev = encode_data(dataset_do_bxtxd, enc_ci_cell, 'ci', 'reverse', hps.num_steps) self.ci_enc_fwd = ci_enc_fwd self.ci_enc_rev = ci_enc_rev with tf.variable_scope('z', reuse=False): self.prior_zs_g0 = None self.posterior_zs_g0 = None self.g0s_val = None if (ic_dim > 0): self.prior_zs_g0 = LearnableDiagonalGaussian(batch_size, ic_dim, name='prior_g0', mean_init=0.0, var_min=hps.ic_prior_var_min, var_init=hps.ic_prior_var_scale, var_max=hps.ic_prior_var_max) ic_enc = tf.concat(axis=1, values=[ic_enc_fwd[(-1)], ic_enc_rev[0]]) ic_enc = tf.nn.dropout(ic_enc, keep_prob) self.posterior_zs_g0 = DiagonalGaussianFromInput(ic_enc, ic_dim, 'ic_enc_2_post_g0', var_min=hps.ic_post_var_min) if (kind in ['train', 'posterior_sample_and_average']): zs_g0 = self.posterior_zs_g0 else: zs_g0 = self.prior_zs_g0 if (kind in ['train', 'posterior_sample_and_average', 'prior_sample']): self.g0s_val = zs_g0.sample else: self.g0s_val = zs_g0.mean self.prior_zs_co = prior_zs_co = ([None] * num_steps) self.posterior_zs_co = posterior_zs_co = ([None] * num_steps) self.zs_co = zs_co = ([None] * num_steps) self.prior_zs_ar_con = None if (co_dim > 0): autocorrelation_taus = [hps.prior_ar_atau for x in range(hps.co_dim)] noise_variances = [hps.prior_ar_nvar for x in range(hps.co_dim)] self.prior_zs_ar_con = prior_zs_ar_con = LearnableAutoRegressive1Prior(batch_size, hps.co_dim, autocorrelation_taus, noise_variances, hps.do_train_prior_ar_atau, hps.do_train_prior_ar_nvar, num_steps, 'u_prior_ar1') self.controller_outputs = u_t = ([None] * num_steps) self.con_ics = con_state = None self.con_states = con_states = ([None] * num_steps) self.con_outs = con_outs = ([None] * num_steps) self.gen_inputs = gen_inputs = ([None] * num_steps) if (co_dim > 0): con_cell = gen_cell_class(hps.con_dim, input_weight_scale=hps.cell_weight_scale, rec_weight_scale=hps.cell_weight_scale, clip_value=hps.cell_clip_value, recurrent_collections=['l2_con_reg']) with tf.variable_scope('con', reuse=False): self.con_ics = tf.tile(tf.Variable(tf.zeros([1, (hps.con_dim * con_cell.state_multiplier)]), name='c0'), tf.stack([batch_size, 1])) self.con_ics.set_shape([None, con_cell.state_size]) con_states[(-1)] = self.con_ics gen_cell = gen_cell_class(hps.gen_dim, input_weight_scale=hps.gen_cell_input_weight_scale, rec_weight_scale=hps.gen_cell_rec_weight_scale, clip_value=hps.cell_clip_value, recurrent_collections=['l2_gen_reg']) with tf.variable_scope('gen', reuse=False): if (ic_dim == 0): self.gen_ics = tf.tile(tf.Variable(tf.zeros([1, gen_cell.state_size]), name='g0'), tf.stack([batch_size, 1])) else: self.gen_ics = linear(self.g0s_val, gen_cell.state_size, identity_if_possible=True, name='g0_2_gen_ic') self.gen_states = gen_states = ([None] * num_steps) self.gen_outs = gen_outs = ([None] * num_steps) gen_states[(-1)] = self.gen_ics gen_outs[(-1)] = gen_cell.output_from_state(gen_states[(-1)]) self.factors = factors = ([None] * num_steps) factors[(-1)] = linear(gen_outs[(-1)], factors_dim, do_bias=False, normalized=True, name='gen_2_fac') self.rates = rates = ([None] * num_steps) with tf.variable_scope('glm', reuse=False): if (hps.output_dist == 'poisson'): log_rates_t0 = (tf.matmul(factors[(-1)], this_out_fac_W) + this_out_fac_b) log_rates_t0.set_shape([None, None]) rates[(-1)] = tf.exp(log_rates_t0) rates[(-1)].set_shape([None, hps.dataset_dims[hps.dataset_names[0]]]) elif (hps.output_dist == 'gaussian'): mean_n_logvars = (tf.matmul(factors[(-1)], this_out_fac_W) + this_out_fac_b) mean_n_logvars.set_shape([None, None]) (means_t_bxd, logvars_t_bxd) = tf.split(axis=1, num_or_size_splits=2, value=mean_n_logvars) rates[(-1)] = means_t_bxd else: assert False, 'NIY' self.output_dist_params = dist_params = ([None] * num_steps) self.log_p_xgz_b = log_p_xgz_b = 0.0 for t in range(num_steps): if (co_dim > 0): tlag = (t - hps.controller_input_lag) if (tlag < 0): con_in_f_t = tf.zeros_like(ci_enc_fwd[0]) else: con_in_f_t = ci_enc_fwd[tlag] if hps.do_causal_controller: con_in_list_t = [con_in_f_t] else: tlag_rev = (t + hps.controller_input_lag) if (tlag_rev >= num_steps): con_in_r_t = tf.zeros_like(ci_enc_rev[0]) else: con_in_r_t = ci_enc_rev[tlag_rev] con_in_list_t = [con_in_f_t, con_in_r_t] if hps.do_feed_factors_to_controller: if (hps.feedback_factors_or_rates == 'factors'): con_in_list_t.append(factors[(t - 1)]) elif (hps.feedback_factors_or_rates == 'rates'): con_in_list_t.append(rates[(t - 1)]) else: assert False, 'NIY' con_in_t = tf.concat(axis=1, values=con_in_list_t) con_in_t = tf.nn.dropout(con_in_t, keep_prob) with tf.variable_scope('con', reuse=(True if (t > 0) else None)): (con_outs[t], con_states[t]) = con_cell(con_in_t, con_states[(t - 1)]) posterior_zs_co[t] = DiagonalGaussianFromInput(con_outs[t], co_dim, name='con_to_post_co') if (kind == 'train'): u_t[t] = posterior_zs_co[t].sample elif (kind == 'posterior_sample_and_average'): u_t[t] = posterior_zs_co[t].sample else: u_t[t] = prior_zs_ar_con.samples_t[t] if ((ext_input_dim > 0) and hps.inject_ext_input_to_gen): ext_input_t_bxi = ext_input_do_bxtxi[:, t, :] if (co_dim > 0): gen_inputs[t] = tf.concat(axis=1, values=[u_t[t], ext_input_t_bxi]) else: gen_inputs[t] = ext_input_t_bxi else: gen_inputs[t] = u_t[t] data_t_bxd = dataset_ph[:, t, :] with tf.variable_scope('gen', reuse=(True if (t > 0) else None)): (gen_outs[t], gen_states[t]) = gen_cell(gen_inputs[t], gen_states[(t - 1)]) gen_outs[t] = tf.nn.dropout(gen_outs[t], keep_prob) with tf.variable_scope('gen', reuse=True): factors[t] = linear(gen_outs[t], factors_dim, do_bias=False, normalized=True, name='gen_2_fac') with tf.variable_scope('glm', reuse=(True if (t > 0) else None)): if (hps.output_dist == 'poisson'): log_rates_t = (tf.matmul(factors[t], this_out_fac_W) + this_out_fac_b) log_rates_t.set_shape([None, None]) rates[t] = dist_params[t] = tf.exp(log_rates_t) rates[t].set_shape([None, hps.dataset_dims[hps.dataset_names[0]]]) loglikelihood_t = Poisson(log_rates_t).logp(data_t_bxd) elif (hps.output_dist == 'gaussian'): mean_n_logvars = (tf.matmul(factors[t], this_out_fac_W) + this_out_fac_b) mean_n_logvars.set_shape([None, None]) (means_t_bxd, logvars_t_bxd) = tf.split(axis=1, num_or_size_splits=2, value=mean_n_logvars) rates[t] = means_t_bxd dist_params[t] = tf.concat(axis=1, values=[means_t_bxd, tf.exp(logvars_t_bxd)]) loglikelihood_t = diag_gaussian_log_likelihood(data_t_bxd, means_t_bxd, logvars_t_bxd) else: assert False, 'NIY' log_p_xgz_b += tf.reduce_sum(loglikelihood_t, [1]) self.corr_cost = tf.constant(0.0) if (hps.co_mean_corr_scale > 0.0): all_sum_corr = [] for i in range(hps.co_dim): for j in range((i + 1), hps.co_dim): sum_corr_ij = tf.constant(0.0) for t in range(num_steps): u_mean_t = posterior_zs_co[t].mean sum_corr_ij += (u_mean_t[:, i] * u_mean_t[:, j]) all_sum_corr.append((0.5 * tf.square(sum_corr_ij))) self.corr_cost = tf.reduce_mean(all_sum_corr) kl_cost_g0_b = tf.zeros_like(batch_size, dtype=tf.float32) kl_cost_co_b = tf.zeros_like(batch_size, dtype=tf.float32) self.kl_cost = tf.constant(0.0) self.recon_cost = tf.constant(0.0) self.nll_bound_vae = tf.constant(0.0) self.nll_bound_iwae = tf.constant(0.0) if (kind in ['train', 'posterior_sample_and_average']): kl_cost_g0_b = 0.0 kl_cost_co_b = 0.0 if (ic_dim > 0): g0_priors = [self.prior_zs_g0] g0_posts = [self.posterior_zs_g0] kl_cost_g0_b = KLCost_GaussianGaussian(g0_posts, g0_priors).kl_cost_b kl_cost_g0_b = (hps.kl_ic_weight * kl_cost_g0_b) if (co_dim > 0): kl_cost_co_b = KLCost_GaussianGaussianProcessSampled(posterior_zs_co, prior_zs_ar_con).kl_cost_b kl_cost_co_b = (hps.kl_co_weight * kl_cost_co_b) self.recon_cost = (- tf.reduce_mean(log_p_xgz_b)) self.kl_cost = tf.reduce_mean((kl_cost_g0_b + kl_cost_co_b)) lb_on_ll_b = ((log_p_xgz_b - kl_cost_g0_b) - kl_cost_co_b) self.nll_bound_vae = (- tf.reduce_mean(lb_on_ll_b)) k = tf.cast(tf.shape(log_p_xgz_b)[0], tf.float32) iwae_lb_on_ll = ((- tf.log(k)) + log_sum_exp(lb_on_ll_b)) self.nll_bound_iwae = (- iwae_lb_on_ll) self.l2_cost = tf.constant(0.0) if ((self.hps.l2_gen_scale > 0.0) or (self.hps.l2_con_scale > 0.0)): l2_costs = [] l2_numels = [] l2_reg_var_lists = [tf.get_collection('l2_gen_reg'), tf.get_collection('l2_con_reg')] l2_reg_scales = [self.hps.l2_gen_scale, self.hps.l2_con_scale] for (l2_reg_vars, l2_scale) in zip(l2_reg_var_lists, l2_reg_scales): for v in l2_reg_vars: numel = tf.reduce_prod(tf.concat(axis=0, values=tf.shape(v))) numel_f = tf.cast(numel, tf.float32) l2_numels.append(numel_f) v_l2 = tf.reduce_sum((v * v)) l2_costs.append(((0.5 * l2_scale) * v_l2)) self.l2_cost = (tf.add_n(l2_costs) / tf.add_n(l2_numels)) self.kl_decay_step = tf.maximum((self.train_step - hps.kl_start_step), 0) self.l2_decay_step = tf.maximum((self.train_step - hps.l2_start_step), 0) kl_decay_step_f = tf.cast(self.kl_decay_step, tf.float32) l2_decay_step_f = tf.cast(self.l2_decay_step, tf.float32) kl_increase_steps_f = tf.cast(hps.kl_increase_steps, tf.float32) l2_increase_steps_f = tf.cast(hps.l2_increase_steps, tf.float32) self.kl_weight = kl_weight = tf.minimum((kl_decay_step_f / kl_increase_steps_f), 1.0) self.l2_weight = l2_weight = tf.minimum((l2_decay_step_f / l2_increase_steps_f), 1.0) self.timed_kl_cost = (kl_weight * self.kl_cost) self.timed_l2_cost = (l2_weight * self.l2_cost) self.weight_corr_cost = (hps.co_mean_corr_scale * self.corr_cost) self.cost = (((self.recon_cost + self.timed_kl_cost) + self.timed_l2_cost) + self.weight_corr_cost) if (kind != 'train'): self.seso_saver = tf.train.Saver(tf.global_variables(), max_to_keep=hps.max_ckpt_to_keep) self.lve_saver = tf.train.Saver(tf.global_variables(), max_to_keep=hps.max_ckpt_to_keep_lve) return if (not self.hps.do_train_io_only): self.train_vars = tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=tf.get_variable_scope().name) else: self.train_vars = tvars = tf.get_collection('IO_transformations', scope=tf.get_variable_scope().name) print('done.') print('Model Variables (to be optimized): ') total_params = 0 for i in range(len(tvars)): shape = tvars[i].get_shape().as_list() print(' ', i, tvars[i].name, shape) total_params += np.prod(shape) print('Total model parameters: ', total_params) grads = tf.gradients(self.cost, tvars) (grads, grad_global_norm) = tf.clip_by_global_norm(grads, hps.max_grad_norm) opt = tf.train.AdamOptimizer(self.learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1) self.grads = grads self.grad_global_norm = grad_global_norm self.train_op = opt.apply_gradients(zip(grads, tvars), global_step=self.train_step) self.seso_saver = tf.train.Saver(tf.global_variables(), max_to_keep=hps.max_ckpt_to_keep) self.lve_saver = tf.train.Saver(tf.global_variables(), max_to_keep=hps.max_ckpt_to_keep) self.example_image = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='image_tensor') self.example_summ = tf.summary.image('LFADS example', self.example_image, collections=['example_summaries']) self.lr_summ = tf.summary.scalar('Learning rate', self.learning_rate) self.kl_weight_summ = tf.summary.scalar('KL weight', self.kl_weight) self.l2_weight_summ = tf.summary.scalar('L2 weight', self.l2_weight) self.corr_cost_summ = tf.summary.scalar('Corr cost', self.weight_corr_cost) self.grad_global_norm_summ = tf.summary.scalar('Gradient global norm', self.grad_global_norm) if (hps.co_dim > 0): self.atau_summ = ([None] * hps.co_dim) self.pvar_summ = ([None] * hps.co_dim) for c in range(hps.co_dim): self.atau_summ[c] = tf.summary.scalar(('AR Autocorrelation taus ' + str(c)), tf.exp(self.prior_zs_ar_con.logataus_1xu[(0, c)])) self.pvar_summ[c] = tf.summary.scalar(('AR Variances ' + str(c)), tf.exp(self.prior_zs_ar_con.logpvars_1xu[(0, c)])) kl_cost_ph = tf.placeholder(tf.float32, shape=[], name='kl_cost_ph') self.kl_t_cost_summ = tf.summary.scalar('KL cost (train)', kl_cost_ph, collections=['train_summaries']) self.kl_v_cost_summ = tf.summary.scalar('KL cost (valid)', kl_cost_ph, collections=['valid_summaries']) l2_cost_ph = tf.placeholder(tf.float32, shape=[], name='l2_cost_ph') self.l2_cost_summ = tf.summary.scalar('L2 cost', l2_cost_ph, collections=['train_summaries']) recon_cost_ph = tf.placeholder(tf.float32, shape=[], name='recon_cost_ph') self.recon_t_cost_summ = tf.summary.scalar('Reconstruction cost (train)', recon_cost_ph, collections=['train_summaries']) self.recon_v_cost_summ = tf.summary.scalar('Reconstruction cost (valid)', recon_cost_ph, collections=['valid_summaries']) total_cost_ph = tf.placeholder(tf.float32, shape=[], name='total_cost_ph') self.cost_t_summ = tf.summary.scalar('Total cost (train)', total_cost_ph, collections=['train_summaries']) self.cost_v_summ = tf.summary.scalar('Total cost (valid)', total_cost_ph, collections=['valid_summaries']) self.kl_cost_ph = kl_cost_ph self.l2_cost_ph = l2_cost_ph self.recon_cost_ph = recon_cost_ph self.total_cost_ph = total_cost_ph self.merged_examples = tf.summary.merge_all(key='example_summaries') self.merged_generic = tf.summary.merge_all() self.merged_train = tf.summary.merge_all(key='train_summaries') self.merged_valid = tf.summary.merge_all(key='valid_summaries') session = tf.get_default_session() self.logfile = os.path.join(hps.lfads_save_dir, 'lfads_log') self.writer = tf.summary.FileWriter(self.logfile)
'Build the feed dictionary, handles cases where there is no value defined. Args: train_name: The key into the datasets, to set the tf.case statement for the proper readin / readout matrices. data_bxtxd: The data tensor ext_input_bxtxi (optional): The external input tensor keep_prob: The drop out keep probability. Returns: The feed dictionary with TF tensors as keys and data as values, for use with tf.Session.run()'
def build_feed_dict(self, train_name, data_bxtxd, ext_input_bxtxi=None, keep_prob=None):
feed_dict = {} (B, T, _) = data_bxtxd.shape feed_dict[self.dataName] = train_name feed_dict[self.dataset_ph] = data_bxtxd if ((self.ext_input is not None) and (ext_input_bxtxi is not None)): feed_dict[self.ext_input] = ext_input_bxtxi if (keep_prob is None): feed_dict[self.keep_prob] = self.hps.keep_prob else: feed_dict[self.keep_prob] = keep_prob return feed_dict
'Get a batch of data, either randomly chosen, or specified directly. Args: data_extxd: The data to model, numpy tensors with shape: # examples x # time steps x # dimensions ext_input_extxi (optional): The external inputs, numpy tensor with shape: # examples x # time steps x # external input dimensions batch_size: The size of the batch to return example_idxs (optional): The example indices used to select examples. Returns: A tuple with two parts: 1. Batched data numpy tensor with shape: batch_size x # time steps x # dimensions 2. Batched external input numpy tensor with shape: batch_size x # time steps x # external input dims'
@staticmethod def get_batch(data_extxd, ext_input_extxi=None, batch_size=None, example_idxs=None):
assert ((batch_size is not None) or (example_idxs is not None)), 'Problems' (E, T, D) = data_extxd.shape if (example_idxs is None): example_idxs = np.random.choice(E, batch_size) ext_input_bxtxi = None if (ext_input_extxi is not None): ext_input_bxtxi = ext_input_extxi[example_idxs, :, :] return (data_extxd[example_idxs, :, :], ext_input_bxtxi)
'Given a number of examples, E, and a batch_size, B, generate indices [0, 1, 2, ... B-1; [B, B+1, ... 2*B-1; returning those indices as a 2-dim tensor shaped like E/B x B. Note that shape is only correct if E % B == 0. If not, then an extra row is generated so that the remainder of examples is included. The extra examples are explicitly to to the zero index (see randomize_example_idxs_mod_batch_size) for randomized behavior. Args: nexamples: The number of examples to batch up. batch_size: The size of the batch. Returns: 2-dim tensor as described above.'
@staticmethod def example_idxs_mod_batch_size(nexamples, batch_size):
bmrem = (batch_size - (nexamples % batch_size)) bmrem_examples = [] if (bmrem < batch_size): ridxs = np.random.permutation(nexamples)[0:bmrem].astype(np.int32) bmrem_examples = np.sort(ridxs) example_idxs = (range(nexamples) + list(bmrem_examples)) example_idxs_e_x_edivb = np.reshape(example_idxs, [(-1), batch_size]) return (example_idxs_e_x_edivb, bmrem)
'Indices 1:nexamples, randomized, in 2D form of shape = (nexamples / batch_size) x batch_size. The remainder is managed by drawing randomly from 1:nexamples. Args: nexamples: number of examples to randomize batch_size: number of elements in batch Returns: The randomized, properly shaped indicies.'
@staticmethod def randomize_example_idxs_mod_batch_size(nexamples, batch_size):
assert (nexamples > batch_size), 'Problems' bmrem = (batch_size - (nexamples % batch_size)) bmrem_examples = [] if (bmrem < batch_size): bmrem_examples = np.random.choice(range(nexamples), size=bmrem, replace=False) example_idxs = (range(nexamples) + list(bmrem_examples)) mixed_example_idxs = np.random.permutation(example_idxs) example_idxs_e_x_edivb = np.reshape(mixed_example_idxs, [(-1), batch_size]) return (example_idxs_e_x_edivb, bmrem)
'Shuffle the spikes in the temporal dimension. This is useful to help the LFADS system avoid overfitting to individual spikes or fast oscillations found in the data that are irrelevant to behavior. A pure \'tabula rasa\' approach would avoid this, but LFADS is sensitive enough to pick up dynamics that you may not want. Args: data_bxtxd: numpy array of spike count data to be shuffled. Returns: S_bxtxd, a numpy array with the same dimensions and contents as data_bxtxd, but shuffled appropriately.'
def shuffle_spikes_in_time(self, data_bxtxd):
(B, T, N) = data_bxtxd.shape w = self.hps.temporal_spike_jitter_width if (w == 0): return data_bxtxd max_counts = np.max(data_bxtxd) S_bxtxd = np.zeros([B, T, N]) for mc in range(1, (max_counts + 1)): idxs = np.nonzero((data_bxtxd >= mc)) data_ones = np.zeros_like(data_bxtxd) data_ones[(data_bxtxd >= mc)] = 1 nfound = len(idxs[0]) shuffles_incrs_in_time = np.random.randint((- w), w, size=nfound) shuffle_tidxs = idxs[1].copy() shuffle_tidxs += shuffles_incrs_in_time shuffle_tidxs[(shuffle_tidxs < 0)] = (- shuffle_tidxs[(shuffle_tidxs < 0)]) shuffle_tidxs[(shuffle_tidxs > (T - 1))] = ((T - 1) - (shuffle_tidxs[(shuffle_tidxs > (T - 1))] - (T - 1))) for iii in zip(idxs[0], shuffle_tidxs, idxs[2]): S_bxtxd[iii] += 1 return S_bxtxd
'Since LFADS supports multiple datasets in the same dynamical model, we have to be careful to use all the data in a single training epoch. But since the datasets my have different data dimensionality, we cannot batch examples from data dictionaries together. Instead, we generate random batches within each data dictionary, and then randomize these batches while holding onto the dataname, so that when it\'s time to feed the graph, the correct in/out matrices can be selected, per batch. Args: datasets: A dict of data dicts. The dataset dict is simply a name(string)-> data dictionary mapping (See top of lfads.py). kind: \'train\' or \'valid\' Returns: A flat list, in which each element is a pair (\'name\', indices).'
def shuffle_and_flatten_datasets(self, datasets, kind='train'):
batch_size = self.hps.batch_size ndatasets = len(datasets) random_example_idxs = {} epoch_idxs = {} all_name_example_idx_pairs = [] kind_data = (kind + '_data') for (name, data_dict) in datasets.items(): (nexamples, ntime, data_dim) = data_dict[kind_data].shape epoch_idxs[name] = 0 (random_example_idxs, _) = self.randomize_example_idxs_mod_batch_size(nexamples, batch_size) epoch_size = random_example_idxs.shape[0] names = ([name] * epoch_size) all_name_example_idx_pairs += zip(names, random_example_idxs) np.random.shuffle(all_name_example_idx_pairs) return all_name_example_idx_pairs
'Train the model through the entire dataset once. Args: datasets: A dict of data dicts. The dataset dict is simply a name(string)-> data dictionary mapping (See top of lfads.py). batch_size (optional): The batch_size to use do_save_ckpt (optional): Should the routine save a checkpoint on this training epoch? Returns: A tuple with 6 float values: (total cost of the epoch, epoch reconstruction cost, epoch kl cost, KL weight used this training epoch, total l2 cost on generator, and the corresponding weight).'
def train_epoch(self, datasets, batch_size=None, do_save_ckpt=True):
ops_to_eval = [self.cost, self.recon_cost, self.kl_cost, self.kl_weight, self.l2_cost, self.l2_weight, self.train_op] collected_op_values = self.run_epoch(datasets, ops_to_eval, kind='train') total_cost = total_recon_cost = total_kl_cost = 0.0 epoch_size = len(collected_op_values) for op_values in collected_op_values: total_cost += op_values[0] total_recon_cost += op_values[1] total_kl_cost += op_values[2] kl_weight = collected_op_values[(-1)][3] l2_cost = collected_op_values[(-1)][4] l2_weight = collected_op_values[(-1)][5] epoch_total_cost = (total_cost / epoch_size) epoch_recon_cost = (total_recon_cost / epoch_size) epoch_kl_cost = (total_kl_cost / epoch_size) if do_save_ckpt: session = tf.get_default_session() checkpoint_path = os.path.join(self.hps.lfads_save_dir, (self.hps.checkpoint_name + '.ckpt')) self.seso_saver.save(session, checkpoint_path, global_step=self.train_step) return (epoch_total_cost, epoch_recon_cost, epoch_kl_cost, kl_weight, l2_cost, l2_weight)
'Run the model through the entire dataset once. Args: datasets: A dict of data dicts. The dataset dict is simply a name(string)-> data dictionary mapping (See top of lfads.py). ops_to_eval: A list of tensorflow operations that will be evaluated in the tf.session.run() call. batch_size (optional): The batch_size to use do_collect (optional): Should the routine collect all session.run output as a list, and return it? keep_prob (optional): The dropout keep probability. Returns: A list of lists, the internal list is the return for the ops for each session.run() call. The outer list collects over the epoch.'
def run_epoch(self, datasets, ops_to_eval, kind='train', batch_size=None, do_collect=True, keep_prob=None):
hps = self.hps all_name_example_idx_pairs = self.shuffle_and_flatten_datasets(datasets, kind) kind_data = (kind + '_data') kind_ext_input = (kind + '_ext_input') total_cost = total_recon_cost = total_kl_cost = 0.0 session = tf.get_default_session() epoch_size = len(all_name_example_idx_pairs) evaled_ops_list = [] for (name, example_idxs) in all_name_example_idx_pairs: data_dict = datasets[name] data_extxd = data_dict[kind_data] if ((hps.output_dist == 'poisson') and (hps.temporal_spike_jitter_width > 0)): data_extxd = self.shuffle_spikes_in_time(data_extxd) ext_input_extxi = data_dict[kind_ext_input] (data_bxtxd, ext_input_bxtxi) = self.get_batch(data_extxd, ext_input_extxi, example_idxs=example_idxs) feed_dict = self.build_feed_dict(name, data_bxtxd, ext_input_bxtxi, keep_prob=keep_prob) evaled_ops_np = session.run(ops_to_eval, feed_dict=feed_dict) if do_collect: evaled_ops_list.append(evaled_ops_np) return evaled_ops_list
'Plot and summarize stuff in tensorboard. Note that everything done in the current function is otherwise done on a single, randomly selected dataset (except for summary_values, which are passed in.) Args: datasets, the dictionary of datasets used in the study. summary_values: These summary values are created from the training loop, and so summarize the entire set of datasets.'
def summarize_all(self, datasets, summary_values):
hps = self.hps tr_kl_cost = summary_values['tr_kl_cost'] tr_recon_cost = summary_values['tr_recon_cost'] tr_total_cost = summary_values['tr_total_cost'] kl_weight = summary_values['kl_weight'] l2_weight = summary_values['l2_weight'] l2_cost = summary_values['l2_cost'] has_any_valid_set = summary_values['has_any_valid_set'] i = summary_values['nepochs'] session = tf.get_default_session() (train_summ, train_step) = session.run([self.merged_train, self.train_step], feed_dict={self.l2_cost_ph: l2_cost, self.kl_cost_ph: tr_kl_cost, self.recon_cost_ph: tr_recon_cost, self.total_cost_ph: tr_total_cost}) self.writer.add_summary(train_summ, train_step) if has_any_valid_set: ev_kl_cost = summary_values['ev_kl_cost'] ev_recon_cost = summary_values['ev_recon_cost'] ev_total_cost = summary_values['ev_total_cost'] eval_summ = session.run(self.merged_valid, feed_dict={self.kl_cost_ph: ev_kl_cost, self.recon_cost_ph: ev_recon_cost, self.total_cost_ph: ev_total_cost}) self.writer.add_summary(eval_summ, train_step) print(('Epoch:%d, step:%d (TRAIN, VALID): total: %.2f, %.2f recon: %.2f, %.2f, kl: %.2f, %.2f, l2: %.5f, kl weight: %.2f, l2 weight: %.2f' % (i, train_step, tr_total_cost, ev_total_cost, tr_recon_cost, ev_recon_cost, tr_kl_cost, ev_kl_cost, l2_cost, kl_weight, l2_weight))) csv_outstr = ('epoch,%d, step,%d, total,%.2f,%.2f, recon,%.2f,%.2f, kl,%.2f,%.2f, l2,%.5f, klweight,%.2f, l2weight,%.2f\n' % (i, train_step, tr_total_cost, ev_total_cost, tr_recon_cost, ev_recon_cost, tr_kl_cost, ev_kl_cost, l2_cost, kl_weight, l2_weight)) else: print(('Epoch:%d, step:%d TRAIN: total: %.2f recon: %.2f, kl: %.2f, l2: %.5f, kl weight: %.2f, l2 weight: %.2f' % (i, train_step, tr_total_cost, tr_recon_cost, tr_kl_cost, l2_cost, kl_weight, l2_weight))) csv_outstr = ('epoch,%d, step,%d, total,%.2f, recon,%.2f, kl,%.2f, l2,%.5f, klweight,%.2f, l2weight,%.2f\n' % (i, train_step, tr_total_cost, tr_recon_cost, tr_kl_cost, l2_cost, kl_weight, l2_weight)) if self.hps.csv_log: csv_file = os.path.join(self.hps.lfads_save_dir, (self.hps.csv_log + '.csv')) with open(csv_file, 'a') as myfile: myfile.write(csv_outstr)
'Plot an image relating to a randomly chosen, specific example. We use posterior sample and average by taking one example, and filling a whole batch with that example, sample from the posterior, and then average the quantities.'
def plot_single_example(self, datasets):
hps = self.hps all_data_names = datasets.keys() data_name = np.random.permutation(all_data_names)[0] data_dict = datasets[data_name] has_valid_set = (True if (data_dict['valid_data'] is not None) else False) cf = 1.0 (E, _, _) = data_dict['train_data'].shape eidx = np.random.choice(E) example_idxs = (eidx * np.ones(hps.batch_size, dtype=np.int32)) (train_data_bxtxd, train_ext_input_bxtxi) = self.get_batch(data_dict['train_data'], data_dict['train_ext_input'], example_idxs=example_idxs) truth_train_data_bxtxd = None if (('train_truth' in data_dict) and (data_dict['train_truth'] is not None)): (truth_train_data_bxtxd, _) = self.get_batch(data_dict['train_truth'], example_idxs=example_idxs) cf = data_dict['conversion_factor'] train_model_values = self.eval_model_runs_batch(data_name, train_data_bxtxd, train_ext_input_bxtxi, do_average_batch=False) train_step = train_model_values['train_steps'] feed_dict = self.build_feed_dict(data_name, train_data_bxtxd, train_ext_input_bxtxi, keep_prob=1.0) session = tf.get_default_session() generic_summ = session.run(self.merged_generic, feed_dict=feed_dict) self.writer.add_summary(generic_summ, train_step) valid_data_bxtxd = valid_model_values = valid_ext_input_bxtxi = None truth_valid_data_bxtxd = None if has_valid_set: (E, _, _) = data_dict['valid_data'].shape eidx = np.random.choice(E) example_idxs = (eidx * np.ones(hps.batch_size, dtype=np.int32)) (valid_data_bxtxd, valid_ext_input_bxtxi) = self.get_batch(data_dict['valid_data'], data_dict['valid_ext_input'], example_idxs=example_idxs) if (('valid_truth' in data_dict) and (data_dict['valid_truth'] is not None)): (truth_valid_data_bxtxd, _) = self.get_batch(data_dict['valid_truth'], example_idxs=example_idxs) else: truth_valid_data_bxtxd = None valid_model_values = self.eval_model_runs_batch(data_name, valid_data_bxtxd, valid_ext_input_bxtxi, do_average_batch=False) example_image = plot_lfads(train_bxtxd=train_data_bxtxd, train_model_vals=train_model_values, train_ext_input_bxtxi=train_ext_input_bxtxi, train_truth_bxtxd=truth_train_data_bxtxd, valid_bxtxd=valid_data_bxtxd, valid_model_vals=valid_model_values, valid_ext_input_bxtxi=valid_ext_input_bxtxi, valid_truth_bxtxd=truth_valid_data_bxtxd, bidx=None, cf=cf, output_dist=hps.output_dist) example_image = np.expand_dims(example_image, axis=0) example_summ = session.run(self.merged_examples, feed_dict={self.example_image: example_image}) self.writer.add_summary(example_summ)
'Train the model, print per-epoch information, and save checkpoints. Loop over training epochs. The function that actually does the training is train_epoch. This function iterates over the training data, one epoch at a time. The learning rate schedule is such that it will stay the same until the cost goes up in comparison to the last few values, then it will drop. Args: datasets: A dict of data dicts. The dataset dict is simply a name(string)-> data dictionary mapping (See top of lfads.py).'
def train_model(self, datasets):
hps = self.hps has_any_valid_set = False for data_dict in datasets.values(): if (data_dict['valid_data'] is not None): has_any_valid_set = True break session = tf.get_default_session() lr = session.run(self.learning_rate) lr_stop = hps.learning_rate_stop i = (-1) train_costs = [] valid_costs = [] ev_total_cost = ev_recon_cost = ev_kl_cost = 0.0 lowest_ev_cost = np.Inf while True: i += 1 do_save_ckpt = (True if ((i % 10) == 0) else False) (tr_total_cost, tr_recon_cost, tr_kl_cost, kl_weight, l2_cost, l2_weight) = self.train_epoch(datasets, do_save_ckpt=do_save_ckpt) if has_any_valid_set: (ev_total_cost, ev_recon_cost, ev_kl_cost) = self.eval_cost_epoch(datasets, kind='valid') valid_costs.append(ev_total_cost) n_lve = 1 run_avg_lve = np.mean(valid_costs[(- n_lve):]) if ((kl_weight >= 1.0) and ((l2_weight >= 1.0) or ((self.hps.l2_gen_scale == 0.0) and (self.hps.l2_con_scale == 0.0))) and ((len(valid_costs) > n_lve) and (run_avg_lve < lowest_ev_cost))): lowest_ev_cost = run_avg_lve checkpoint_path = os.path.join(self.hps.lfads_save_dir, (self.hps.checkpoint_name + '_lve.ckpt')) self.lve_saver.save(session, checkpoint_path, global_step=self.train_step, latest_filename='checkpoint_lve') values = {'nepochs': i, 'has_any_valid_set': has_any_valid_set, 'tr_total_cost': tr_total_cost, 'ev_total_cost': ev_total_cost, 'tr_recon_cost': tr_recon_cost, 'ev_recon_cost': ev_recon_cost, 'tr_kl_cost': tr_kl_cost, 'ev_kl_cost': ev_kl_cost, 'l2_weight': l2_weight, 'kl_weight': kl_weight, 'l2_cost': l2_cost} self.summarize_all(datasets, values) self.plot_single_example(datasets) train_res = tr_total_cost n_lr = hps.learning_rate_n_to_compare if ((len(train_costs) > n_lr) and (train_res > np.max(train_costs[(- n_lr):]))): _ = session.run(self.learning_rate_decay_op) lr = session.run(self.learning_rate) print((' Decreasing learning rate to %f.' % lr)) train_costs.append(np.inf) else: train_costs.append(train_res) if (lr < lr_stop): print('Stopping optimization based on learning rate criteria.') break
'Evaluate the cost of the epoch. Args: data_dict: The dictionary of data (training and validation) used for training and evaluation of the model, respectively. Returns: a 3 tuple of costs: (epoch total cost, epoch reconstruction cost, epoch KL cost)'
def eval_cost_epoch(self, datasets, kind='train', ext_input_extxi=None, batch_size=None):
ops_to_eval = [self.cost, self.recon_cost, self.kl_cost] collected_op_values = self.run_epoch(datasets, ops_to_eval, kind=kind, keep_prob=1.0) total_cost = total_recon_cost = total_kl_cost = 0.0 epoch_size = len(collected_op_values) for op_values in collected_op_values: total_cost += op_values[0] total_recon_cost += op_values[1] total_kl_cost += op_values[2] epoch_total_cost = (total_cost / epoch_size) epoch_recon_cost = (total_recon_cost / epoch_size) epoch_kl_cost = (total_kl_cost / epoch_size) return (epoch_total_cost, epoch_recon_cost, epoch_kl_cost)
'Returns all the goodies for the entire model, per batch. Args: data_name: The name of the data dict, to select which in/out matrices to use. data_bxtxd: Numpy array training data with shape: batch_size x # time steps x # dimensions ext_input_bxtxi: Numpy array training external input with shape: batch_size x # time steps x # external input dims do_eval_cost (optional): If true, the IWAE (Importance Weighted Autoencoder) log likeihood bound, instead of the VAE version. do_average_batch (optional): average over the batch, useful for getting good IWAE costs, and model outputs for a single data point. Returns: A dictionary with the outputs of the model decoder, namely: prior g0 mean, prior g0 variance, approx. posterior mean, approx posterior mean, the generator initial conditions, the control inputs (if enabled), the state of the generator, the factors, and the rates.'
def eval_model_runs_batch(self, data_name, data_bxtxd, ext_input_bxtxi=None, do_eval_cost=False, do_average_batch=False):
session = tf.get_default_session() feed_dict = self.build_feed_dict(data_name, data_bxtxd, ext_input_bxtxi, keep_prob=1.0) tf_vals = [self.gen_ics, self.gen_states, self.factors, self.output_dist_params] tf_vals.append(self.cost) tf_vals.append(self.nll_bound_vae) tf_vals.append(self.nll_bound_iwae) tf_vals.append(self.train_step) if (self.hps.ic_dim > 0): tf_vals += [self.prior_zs_g0.mean, self.prior_zs_g0.logvar, self.posterior_zs_g0.mean, self.posterior_zs_g0.logvar] if (self.hps.co_dim > 0): tf_vals.append(self.controller_outputs) (tf_vals_flat, fidxs) = flatten(tf_vals) np_vals_flat = session.run(tf_vals_flat, feed_dict=feed_dict) ff = 0 gen_ics = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 gen_states = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 factors = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 out_dist_params = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 costs = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 nll_bound_vaes = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 nll_bound_iwaes = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 train_steps = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 if (self.hps.ic_dim > 0): prior_g0_mean = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 prior_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 post_g0_mean = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 post_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 if (self.hps.co_dim > 0): controller_outputs = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 gen_ics = gen_ics[0] costs = costs[0] nll_bound_vaes = nll_bound_vaes[0] nll_bound_iwaes = nll_bound_iwaes[0] train_steps = train_steps[0] gen_states = list_t_bxn_to_tensor_bxtxn(gen_states) factors = list_t_bxn_to_tensor_bxtxn(factors) out_dist_params = list_t_bxn_to_tensor_bxtxn(out_dist_params) if (self.hps.ic_dim > 0): prior_g0_mean = prior_g0_mean[0] prior_g0_logvar = prior_g0_logvar[0] post_g0_mean = post_g0_mean[0] post_g0_logvar = post_g0_logvar[0] if (self.hps.co_dim > 0): controller_outputs = list_t_bxn_to_tensor_bxtxn(controller_outputs) if do_average_batch: gen_ics = np.mean(gen_ics, axis=0) gen_states = np.mean(gen_states, axis=0) factors = np.mean(factors, axis=0) out_dist_params = np.mean(out_dist_params, axis=0) if (self.hps.ic_dim > 0): prior_g0_mean = np.mean(prior_g0_mean, axis=0) prior_g0_logvar = np.mean(prior_g0_logvar, axis=0) post_g0_mean = np.mean(post_g0_mean, axis=0) post_g0_logvar = np.mean(post_g0_logvar, axis=0) if (self.hps.co_dim > 0): controller_outputs = np.mean(controller_outputs, axis=0) model_vals = {} model_vals['gen_ics'] = gen_ics model_vals['gen_states'] = gen_states model_vals['factors'] = factors model_vals['output_dist_params'] = out_dist_params model_vals['costs'] = costs model_vals['nll_bound_vaes'] = nll_bound_vaes model_vals['nll_bound_iwaes'] = nll_bound_iwaes model_vals['train_steps'] = train_steps if (self.hps.ic_dim > 0): model_vals['prior_g0_mean'] = prior_g0_mean model_vals['prior_g0_logvar'] = prior_g0_logvar model_vals['post_g0_mean'] = post_g0_mean model_vals['post_g0_logvar'] = post_g0_logvar if (self.hps.co_dim > 0): model_vals['controller_outputs'] = controller_outputs return model_vals
'Returns all the expected value for goodies for the entire model. The expected value is taken over hidden (z) variables, namely the initial conditions and the control inputs. The expected value is approximate, and accomplished via sampling (batch_size) samples for every examples. Args: data_name: The name of the data dict, to select which in/out matrices to use. data_extxd: Numpy array training data with shape: # examples x # time steps x # dimensions ext_input_extxi (optional): Numpy array training external input with shape: # examples x # time steps x # external input dims Returns: A dictionary with the averaged outputs of the model decoder, namely: prior g0 mean, prior g0 variance, approx. posterior mean, approx posterior mean, the generator initial conditions, the control inputs (if enabled), the state of the generator, the factors, and the output distribution parameters, e.g. (rates or mean and variances).'
def eval_model_runs_avg_epoch(self, data_name, data_extxd, ext_input_extxi=None):
hps = self.hps batch_size = hps.batch_size (E, T, D) = data_extxd.shape E_to_process = hps.ps_nexamples_to_process if (E_to_process > E): print('Setting number of posterior samples to process to : ', E) E_to_process = E if (hps.ic_dim > 0): prior_g0_mean = np.zeros([E_to_process, hps.ic_dim]) prior_g0_logvar = np.zeros([E_to_process, hps.ic_dim]) post_g0_mean = np.zeros([E_to_process, hps.ic_dim]) post_g0_logvar = np.zeros([E_to_process, hps.ic_dim]) if (hps.co_dim > 0): controller_outputs = np.zeros([E_to_process, T, hps.co_dim]) gen_ics = np.zeros([E_to_process, hps.gen_dim]) gen_states = np.zeros([E_to_process, T, hps.gen_dim]) factors = np.zeros([E_to_process, T, hps.factors_dim]) if (hps.output_dist == 'poisson'): out_dist_params = np.zeros([E_to_process, T, D]) elif (hps.output_dist == 'gaussian'): out_dist_params = np.zeros([E_to_process, T, (D + D)]) else: assert False, 'NIY' costs = np.zeros(E_to_process) nll_bound_vaes = np.zeros(E_to_process) nll_bound_iwaes = np.zeros(E_to_process) train_steps = np.zeros(E_to_process) for es_idx in range(E_to_process): print(('Running %d of %d.' % ((es_idx + 1), E_to_process))) example_idxs = (es_idx * np.ones(batch_size, dtype=np.int32)) (data_bxtxd, ext_input_bxtxi) = self.get_batch(data_extxd, ext_input_extxi, batch_size=batch_size, example_idxs=example_idxs) model_values = self.eval_model_runs_batch(data_name, data_bxtxd, ext_input_bxtxi, do_eval_cost=True, do_average_batch=True) if (self.hps.ic_dim > 0): prior_g0_mean[es_idx, :] = model_values['prior_g0_mean'] prior_g0_logvar[es_idx, :] = model_values['prior_g0_logvar'] post_g0_mean[es_idx, :] = model_values['post_g0_mean'] post_g0_logvar[es_idx, :] = model_values['post_g0_logvar'] gen_ics[es_idx, :] = model_values['gen_ics'] if (self.hps.co_dim > 0): controller_outputs[es_idx, :, :] = model_values['controller_outputs'] gen_states[es_idx, :, :] = model_values['gen_states'] factors[es_idx, :, :] = model_values['factors'] out_dist_params[es_idx, :, :] = model_values['output_dist_params'] costs[es_idx] = model_values['costs'] nll_bound_vaes[es_idx] = model_values['nll_bound_vaes'] nll_bound_iwaes[es_idx] = model_values['nll_bound_iwaes'] train_steps[es_idx] = model_values['train_steps'] print(('bound nll(vae): %.3f, bound nll(iwae): %.3f' % (nll_bound_vaes[es_idx], nll_bound_iwaes[es_idx]))) model_runs = {} if (self.hps.ic_dim > 0): model_runs['prior_g0_mean'] = prior_g0_mean model_runs['prior_g0_logvar'] = prior_g0_logvar model_runs['post_g0_mean'] = post_g0_mean model_runs['post_g0_logvar'] = post_g0_logvar model_runs['gen_ics'] = gen_ics if (self.hps.co_dim > 0): model_runs['controller_outputs'] = controller_outputs model_runs['gen_states'] = gen_states model_runs['factors'] = factors model_runs['output_dist_params'] = out_dist_params model_runs['costs'] = costs model_runs['nll_bound_vaes'] = nll_bound_vaes model_runs['nll_bound_iwaes'] = nll_bound_iwaes model_runs['train_steps'] = train_steps return model_runs
'Run the model on the data in data_dict, and save the computed values. LFADS generates a number of outputs for each examples, and these are all saved. They are: The mean and variance of the prior of g0. The mean and variance of approximate posterior of g0. The control inputs (if enabled) The initial conditions, g0, for all examples. The generator states for all time. The factors for all time. The output distribution parameters (e.g. rates) for all time. Args: datasets: a dictionary of named data_dictionaries, see top of lfads.py output_fname: a file name stem for the output files.'
def write_model_runs(self, datasets, output_fname=None):
hps = self.hps kind = hps.kind for (data_name, data_dict) in datasets.items(): data_tuple = [('train', data_dict['train_data'], data_dict['train_ext_input']), ('valid', data_dict['valid_data'], data_dict['valid_ext_input'])] for (data_kind, data_extxd, ext_input_extxi) in data_tuple: if (not output_fname): fname = ((((('model_runs_' + data_name) + '_') + data_kind) + '_') + kind) else: fname = (((((output_fname + data_name) + '_') + data_kind) + '_') + kind) print(('Writing data for %s data and kind %s.' % (data_name, data_kind))) model_runs = self.eval_model_runs_avg_epoch(data_name, data_extxd, ext_input_extxi) full_fname = os.path.join(hps.lfads_save_dir, fname) write_data(full_fname, model_runs, compression='gzip') print('Done.')
'Use the prior distribution to generate batch_size number of samples from the model. LFADS generates a number of outputs for each sample, and these are all saved. They are: The mean and variance of the prior of g0. The control inputs (if enabled) The initial conditions, g0, for all examples. The generator states for all time. The factors for all time. The output distribution parameters (e.g. rates) for all time. Args: dataset_name: The name of the dataset to grab the factors -> rates alignment matrices from. output_fname: The name of the file in which to save the generated samples.'
def write_model_samples(self, dataset_name, output_fname=None):
hps = self.hps batch_size = hps.batch_size print(('Generating %d samples' % batch_size)) tf_vals = [self.factors, self.gen_states, self.gen_ics, self.cost, self.output_dist_params] if (hps.ic_dim > 0): tf_vals += [self.prior_zs_g0.mean, self.prior_zs_g0.logvar] if (hps.co_dim > 0): tf_vals += [self.prior_zs_ar_con.samples_t] (tf_vals_flat, fidxs) = flatten(tf_vals) session = tf.get_default_session() feed_dict = {} feed_dict[self.dataName] = dataset_name feed_dict[self.keep_prob] = 1.0 np_vals_flat = session.run(tf_vals_flat, feed_dict=feed_dict) ff = 0 factors = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 gen_states = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 gen_ics = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 costs = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 output_dist_params = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 if (hps.ic_dim > 0): prior_g0_mean = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 prior_g0_logvar = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 if (hps.co_dim > 0): prior_zs_ar_con = [np_vals_flat[f] for f in fidxs[ff]] ff += 1 gen_ics = gen_ics[0] costs = costs[0] gen_states = list_t_bxn_to_tensor_bxtxn(gen_states) factors = list_t_bxn_to_tensor_bxtxn(factors) output_dist_params = list_t_bxn_to_tensor_bxtxn(output_dist_params) if (hps.ic_dim > 0): prior_g0_mean = prior_g0_mean[0] prior_g0_logvar = prior_g0_logvar[0] if (hps.co_dim > 0): prior_zs_ar_con = list_t_bxn_to_tensor_bxtxn(prior_zs_ar_con) model_vals = {} model_vals['gen_ics'] = gen_ics model_vals['gen_states'] = gen_states model_vals['factors'] = factors model_vals['output_dist_params'] = output_dist_params model_vals['costs'] = costs.reshape(1) if (hps.ic_dim > 0): model_vals['prior_g0_mean'] = prior_g0_mean model_vals['prior_g0_logvar'] = prior_g0_logvar if (hps.co_dim > 0): model_vals['prior_zs_ar_con'] = prior_zs_ar_con full_fname = os.path.join(hps.lfads_save_dir, output_fname) write_data(full_fname, model_vals, compression='gzip') print('Done.')
'Evaluate and return all of the TF variables in the model. Args: use_nested (optional): For returning values, use a nested dictoinary, based on variable scoping, or return all variables in a flat dictionary. include_strs (optional): A list of strings to use as a filter, to reduce the number of variables returned. A variable name must contain at least one string in include_strs as a sub-string in order to be returned. Returns: The parameters of the model. This can be in a flat dictionary, or a nested dictionary, where the nesting is by variable scope.'
@staticmethod def eval_model_parameters(use_nested=True, include_strs=None):
all_tf_vars = tf.global_variables() session = tf.get_default_session() all_tf_vars_eval = session.run(all_tf_vars) vars_dict = {} strs = ['LFADS'] if include_strs: strs += include_strs for (i, (var, var_eval)) in enumerate(zip(all_tf_vars, all_tf_vars_eval)): if any(((s in include_strs) for s in var.name)): if (not isinstance(var_eval, np.ndarray)): print(var.name, ' is not numpy array, saving as numpy array\n with value: ', var_eval, type(var_eval)) e = np.array(var_eval) print(e, type(e)) else: e = var_eval vars_dict[var.name] = e if (not use_nested): return vars_dict var_names = vars_dict.keys() nested_vars_dict = {} current_dict = nested_vars_dict for (v, var_name) in enumerate(var_names): var_split_name_list = var_name.split('/') split_name_list_len = len(var_split_name_list) current_dict = nested_vars_dict for (p, part) in enumerate(var_split_name_list): if (p < (split_name_list_len - 1)): if (part in current_dict): current_dict = current_dict[part] else: current_dict[part] = {} current_dict = current_dict[part] else: current_dict[part] = vars_dict[var_name] return nested_vars_dict
'Randomly spikify underlying rates according a Poisson distribution Args: rates_bxtxd: a numpy tensor with shape: Returns: A numpy array with the same shape as rates_bxtxd, but with the event counts.'
@staticmethod def spikify_rates(rates_bxtxd):
(B, T, N) = rates_bxtxd.shape assert all([(B > 0), (N > 0)]), 'problems' spikes_bxtxd = np.zeros([B, T, N], dtype=np.int32) for b in range(B): for t in range(T): for n in range(N): rate = rates_bxtxd[(b, t, n)] count = np.random.poisson(rate) spikes_bxtxd[(b, t, n)] = count return spikes_bxtxd
'Create Poisson distributions with log_rates parameters. Args: log_rates: a tensor-like list of log rates underlying the Poisson dist.'
def __init__(self, log_rates):
self.logr = log_rates
'Compute the log probability for the counts in the bin, under the model. Args: bin_counts: array-like integer counts Returns: The log-probability under the Poisson models for each element of bin_counts.'
def logp(self, bin_counts):
k = tf.to_float(bin_counts) return (((k * self.logr) - tf.exp(self.logr)) - tf.lgamma((k + 1)))
'Create a diagonal gaussian distribution. Args: batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples. z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor. mean: The N-D mean of the distribution. logvar: The N-D log variance of the diagonal distribution.'
def __init__(self, batch_size, z_size, mean, logvar):
size__xz = [None, z_size] self.mean = mean self.logvar = logvar self.noise = noise = tf.random_normal(tf.shape(logvar)) self.sample = (mean + (tf.exp((0.5 * logvar)) * noise)) mean.set_shape(size__xz) logvar.set_shape(size__xz) self.sample.set_shape(size__xz)
'Compute the log-likelihood under the distribution. Args: z (optional): value to compute likelihood for, if None, use sample. Returns: The likelihood of z under the model.'
def logp(self, z=None):
if (z is None): z = self.sample if (z == self.sample): return gaussian_pos_log_likelihood(self.mean, self.logvar, self.noise) return diag_gaussian_log_likelihood(z, self.mean, self.logvar)
'Create a learnable diagonal gaussian distribution. Args: batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples. z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor. name: prefix name for the mean and log TF variables. mean_init (optional): The N-D mean initialization of the distribution. var_init (optional): The N-D variance initialization of the diagonal distribution. var_min (optional): The minimum value the learned variance can take in any dimension. var_max (optional): The maximum value the learned variance can take in any dimension.'
def __init__(self, batch_size, z_size, name, mean_init=0.0, var_init=1.0, var_min=0.0, var_max=1000000.0):
size_1xn = [1, z_size] size__xn = [None, z_size] size_bx1 = tf.stack([batch_size, 1]) assert (var_init > 0.0), 'Problems' assert (var_max >= var_min), 'Problems' assert (var_init >= var_min), 'Problems' assert (var_max >= var_init), 'Problems' z_mean_1xn = tf.get_variable(name=(name + '/mean'), shape=size_1xn, initializer=tf.constant_initializer(mean_init)) self.mean_bxn = mean_bxn = tf.tile(z_mean_1xn, size_bx1) mean_bxn.set_shape(size__xn) log_var_init = np.log(var_init) if (var_max > var_min): var_is_trainable = True else: var_is_trainable = False z_logvar_1xn = tf.get_variable(name=(name + '/logvar'), shape=size_1xn, initializer=tf.constant_initializer(log_var_init), trainable=var_is_trainable) if var_is_trainable: z_logit_var_1xn = tf.exp(z_logvar_1xn) z_var_1xn = ((tf.nn.sigmoid(z_logit_var_1xn) * (var_max - var_min)) + var_min) z_logvar_1xn = tf.log(z_var_1xn) logvar_bxn = tf.tile(z_logvar_1xn, size_bx1) self.logvar_bxn = logvar_bxn self.noise_bxn = noise_bxn = tf.random_normal(tf.shape(logvar_bxn)) self.sample_bxn = (mean_bxn + (tf.exp((0.5 * logvar_bxn)) * noise_bxn))
'Compute the log-likelihood under the distribution. Args: z (optional): value to compute likelihood for, if None, use sample. Returns: The likelihood of z under the model.'
def logp(self, z=None):
if (z is None): z = self.sample if (z == self.sample_bxn): return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn, self.noise_bxn) return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
'Create an input dependent diagonal Gaussian distribution. Args: x: The input tensor from which the mean and variance are computed, via a linear transformation of x. I.e. mu = Wx + b, log(var) = Mx + c z_size: The size of the distribution. name: The name to prefix to learned variables. var_min (optional): Minimal variance allowed. This is an additional way to control the amount of information getting through the stochastic layer.'
def __init__(self, x_bxu, z_size, name, var_min=0.0):
size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size]) self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name + '/mean')) logvar_bxn = linear(x_bxu, z_size, name=(name + '/logvar')) if (var_min > 0.0): logvar_bxn = tf.log((tf.exp(logvar_bxn) + var_min)) self.logvar_bxn = logvar_bxn self.noise_bxn = noise_bxn = tf.random_normal(size_bxn) self.noise_bxn.set_shape([None, z_size]) self.sample_bxn = (mean_bxn + (tf.exp((0.5 * logvar_bxn)) * noise_bxn))
'Compute the log-likelihood under the distribution. Args: z (optional): value to compute likelihood for, if None, use sample. Returns: The likelihood of z under the model.'
def logp(self, z=None):
if (z is None): z = self.sample if (z == self.sample_bxn): return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn, self.noise_bxn) return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
'Create a learnable autoregressive (1) process. Args: batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples. z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor. autocorrelation_taus: The auto correlation time constant of the AR(1) process. A value of 0 is uncorrelated gaussian noise. noise_variances: The variance of the additive noise, *not* the process variance. do_train_prior_ar_atau: Train or leave as constant, the autocorrelation? do_train_prior_ar_nvar: Train or leave as constant, the noise variance? num_steps: Number of steps to run the process. name: The name to prefix to learned TF variables.'
def __init__(self, batch_size, z_size, autocorrelation_taus, noise_variances, do_train_prior_ar_atau, do_train_prior_ar_nvar, num_steps, name):
size_bx1 = tf.stack([batch_size, 1]) size__xu = [None, z_size] log_evar_inits_1xu = tf.expand_dims(tf.log(noise_variances), 0) self.logevars_1xu = logevars_1xu = tf.Variable(log_evar_inits_1xu, name=(name + '/logevars'), dtype=tf.float32, trainable=do_train_prior_ar_nvar) self.logevars_bxu = logevars_bxu = tf.tile(logevars_1xu, size_bx1) logevars_bxu.set_shape(size__xu) log_atau_inits_1xu = tf.expand_dims(tf.log(autocorrelation_taus), 0) self.logataus_1xu = logataus_1xu = tf.Variable(log_atau_inits_1xu, name=(name + '/logatau'), dtype=tf.float32, trainable=do_train_prior_ar_atau) phis_1xu = tf.exp((- tf.exp((- logataus_1xu)))) self.phis_bxu = phis_bxu = tf.tile(phis_1xu, size_bx1) phis_bxu.set_shape(size__xu) self.logpvars_1xu = ((logevars_1xu - tf.log((1.0 - phis_1xu))) - tf.log((1.0 + phis_1xu))) self.logpvars_bxu = logpvars_bxu = tf.tile(self.logpvars_1xu, size_bx1) logpvars_bxu.set_shape(size__xu) self.pmeans_bxu = pmeans_bxu = tf.zeros_like(phis_bxu) self.means_t = means_t = ([None] * num_steps) self.logvars_t = logvars_t = ([None] * num_steps) self.samples_t = samples_t = ([None] * num_steps) self.gaussians_t = gaussians_t = ([None] * num_steps) sample_bxu = tf.zeros_like(phis_bxu) for t in range(num_steps): if (t == 0): logvar_pt_bxu = self.logpvars_bxu else: logvar_pt_bxu = self.logevars_bxu z_mean_pt_bxu = (pmeans_bxu + (phis_bxu * sample_bxu)) gaussians_t[t] = DiagonalGaussian(batch_size, z_size, mean=z_mean_pt_bxu, logvar=logvar_pt_bxu) sample_bxu = gaussians_t[t].sample samples_t[t] = sample_bxu logvars_t[t] = logvar_pt_bxu means_t[t] = z_mean_pt_bxu
'Compute the log-likelihood under the distribution for a given time t, not the whole sequence. Args: z_t_bxu: sample to compute likelihood for at time t. z_tm1_bxu (optional): sample condition probability of z_t upon. Returns: The likelihood of p_t under the model at time t. i.e. p(z_t|z_tm1) = N(z_tm1 * phis, eps^2)'
def logp_t(self, z_t_bxu, z_tm1_bxu=None):
if (z_tm1_bxu is None): return diag_gaussian_log_likelihood(z_t_bxu, self.pmeans_bxu, self.logpvars_bxu) else: means_t_bxu = (self.pmeans_bxu + (self.phis_bxu * z_tm1_bxu)) logp_tgtm1_bxu = diag_gaussian_log_likelihood(z_t_bxu, means_t_bxu, self.logevars_bxu) return logp_tgtm1_bxu
'Create a lower bound in three parts, normalized reconstruction cost, normalized KL divergence cost, and their sum. E_q[ln p(z_i | z_{i+1}) / q(z_i | x) \int q(z) ln p(z) dz = - 0.5 ln(2pi) - 0.5 \sum (ln(sigma_p^2) + sigma_q^2 / sigma_p^2 + (mean_p - mean_q)^2 / sigma_p^2) \int q(z) ln q(z) dz = - 0.5 ln(2pi) - 0.5 \sum (ln(sigma_q^2) + 1) Args: zs: posterior z ~ q(z|x) prior_zs: prior zs'
def __init__(self, zs, prior_zs):
kl_b = 0.0 for (z, prior_z) in zip(zs, prior_zs): assert isinstance(z, Gaussian) assert isinstance(prior_z, Gaussian) kl_b += (0.5 * tf.reduce_sum(((((prior_z.logvar - z.logvar) + tf.exp((z.logvar - prior_z.logvar))) + tf.square(((z.mean - prior_z.mean) / tf.exp((0.5 * prior_z.logvar))))) - 1.0), [1])) self.kl_cost_b = kl_b self.kl_cost = tf.reduce_mean(kl_b)
'Create a lower bound in three parts, normalized reconstruction cost, normalized KL divergence cost, and their sum. Args: post_zs: posterior z ~ q(z|x) prior_z_process: prior AR(1) process'
def __init__(self, post_zs, prior_z_process):
assert (len(post_zs) > 1), 'GP is for time, need more than 1 time step.' assert isinstance(prior_z_process, GaussianProcess), 'Must use GP.' z0_bxu = post_zs[0].sample logq_bxu = post_zs[0].logp(z0_bxu) logp_bxu = prior_z_process.logp_t(z0_bxu) z_tm1_bxu = z0_bxu for z_t in post_zs[1:]: z_t_bxu = z_t.sample logq_bxu += z_t.logp(z_t_bxu) logp_bxu += prior_z_process.logp_t(z_t_bxu, z_tm1_bxu) z_tm1 = z_t_bxu kl_bxu = (logq_bxu - logp_bxu) kl_b = tf.reduce_sum(kl_bxu, [1]) self.kl_cost_b = kl_b self.kl_cost = tf.reduce_mean(kl_b)
'ResNet constructor. Args: hps: Hyperparameters. images: Batches of images. [batch_size, image_size, image_size, 3] labels: Batches of labels. [batch_size, num_classes] mode: One of \'train\' and \'eval\'.'
def __init__(self, hps, images, labels, mode):
self.hps = hps self._images = images self.labels = labels self.mode = mode self._extra_train_ops = []
'Build a whole graph for the model.'
def build_graph(self):
self.global_step = tf.contrib.framework.get_or_create_global_step() self._build_model() if (self.mode == 'train'): self._build_train_op() self.summaries = tf.summary.merge_all()
'Map a stride scalar to the stride array for tf.nn.conv2d.'
def _stride_arr(self, stride):
return [1, stride, stride, 1]
'Build the core model within the graph.'
def _build_model(self):
with tf.variable_scope('init'): x = self._images x = self._conv('init_conv', x, 3, 3, 16, self._stride_arr(1)) strides = [1, 2, 2] activate_before_residual = [True, False, False] if self.hps.use_bottleneck: res_func = self._bottleneck_residual filters = [16, 64, 128, 256] else: res_func = self._residual filters = [16, 16, 32, 64] with tf.variable_scope('unit_1_0'): x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]), activate_before_residual[0]) for i in six.moves.range(1, self.hps.num_residual_units): with tf.variable_scope(('unit_1_%d' % i)): x = res_func(x, filters[1], filters[1], self._stride_arr(1), False) with tf.variable_scope('unit_2_0'): x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]), activate_before_residual[1]) for i in six.moves.range(1, self.hps.num_residual_units): with tf.variable_scope(('unit_2_%d' % i)): x = res_func(x, filters[2], filters[2], self._stride_arr(1), False) with tf.variable_scope('unit_3_0'): x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]), activate_before_residual[2]) for i in six.moves.range(1, self.hps.num_residual_units): with tf.variable_scope(('unit_3_%d' % i)): x = res_func(x, filters[3], filters[3], self._stride_arr(1), False) with tf.variable_scope('unit_last'): x = self._batch_norm('final_bn', x) x = self._relu(x, self.hps.relu_leakiness) x = self._global_avg_pool(x) with tf.variable_scope('logit'): logits = self._fully_connected(x, self.hps.num_classes) self.predictions = tf.nn.softmax(logits) with tf.variable_scope('costs'): xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.labels) self.cost = tf.reduce_mean(xent, name='xent') self.cost += self._decay() tf.summary.scalar('cost', self.cost)
'Build training specific ops for the graph.'
def _build_train_op(self):
self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32) tf.summary.scalar('learning_rate', self.lrn_rate) trainable_variables = tf.trainable_variables() grads = tf.gradients(self.cost, trainable_variables) if (self.hps.optimizer == 'sgd'): optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate) elif (self.hps.optimizer == 'mom'): optimizer = tf.train.MomentumOptimizer(self.lrn_rate, 0.9) apply_op = optimizer.apply_gradients(zip(grads, trainable_variables), global_step=self.global_step, name='train_step') train_ops = ([apply_op] + self._extra_train_ops) self.train_op = tf.group(*train_ops)
'Batch normalization.'
def _batch_norm(self, name, x):
with tf.variable_scope(name): params_shape = [x.get_shape()[(-1)]] beta = tf.get_variable('beta', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32)) gamma = tf.get_variable('gamma', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32)) if (self.mode == 'train'): (mean, variance) = tf.nn.moments(x, [0, 1, 2], name='moments') moving_mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False) moving_variance = tf.get_variable('moving_variance', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False) self._extra_train_ops.append(moving_averages.assign_moving_average(moving_mean, mean, 0.9)) self._extra_train_ops.append(moving_averages.assign_moving_average(moving_variance, variance, 0.9)) else: mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False) variance = tf.get_variable('moving_variance', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False) tf.summary.histogram(mean.op.name, mean) tf.summary.histogram(variance.op.name, variance) y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001) y.set_shape(x.get_shape()) return y
'Residual unit with 2 sub layers.'
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual: with tf.variable_scope('shared_activation'): x = self._batch_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) orig_x = x else: with tf.variable_scope('residual_only_activation'): orig_x = x x = self._batch_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) with tf.variable_scope('sub1'): x = self._conv('conv1', x, 3, in_filter, out_filter, stride) with tf.variable_scope('sub2'): x = self._batch_norm('bn2', x) x = self._relu(x, self.hps.relu_leakiness) x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1]) with tf.variable_scope('sub_add'): if (in_filter != out_filter): orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID') orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [((out_filter - in_filter) // 2), ((out_filter - in_filter) // 2)]]) x += orig_x tf.logging.debug('image after unit %s', x.get_shape()) return x
'Bottleneck residual unit with 3 sub layers.'
def _bottleneck_residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual: with tf.variable_scope('common_bn_relu'): x = self._batch_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) orig_x = x else: with tf.variable_scope('residual_bn_relu'): orig_x = x x = self._batch_norm('init_bn', x) x = self._relu(x, self.hps.relu_leakiness) with tf.variable_scope('sub1'): x = self._conv('conv1', x, 1, in_filter, (out_filter / 4), stride) with tf.variable_scope('sub2'): x = self._batch_norm('bn2', x) x = self._relu(x, self.hps.relu_leakiness) x = self._conv('conv2', x, 3, (out_filter / 4), (out_filter / 4), [1, 1, 1, 1]) with tf.variable_scope('sub3'): x = self._batch_norm('bn3', x) x = self._relu(x, self.hps.relu_leakiness) x = self._conv('conv3', x, 1, (out_filter / 4), out_filter, [1, 1, 1, 1]) with tf.variable_scope('sub_add'): if (in_filter != out_filter): orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride) x += orig_x tf.logging.info('image after unit %s', x.get_shape()) return x
'L2 weight decay loss.'
def _decay(self):
costs = [] for var in tf.trainable_variables(): if (var.op.name.find('DW') > 0): costs.append(tf.nn.l2_loss(var)) return tf.multiply(self.hps.weight_decay_rate, tf.add_n(costs))
'Convolution.'
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
with tf.variable_scope(name): n = ((filter_size * filter_size) * out_filters) kernel = tf.get_variable('DW', [filter_size, filter_size, in_filters, out_filters], tf.float32, initializer=tf.random_normal_initializer(stddev=np.sqrt((2.0 / n)))) return tf.nn.conv2d(x, kernel, strides, padding='SAME')
'Relu, with optional leaky support.'
def _relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), (leakiness * x), x, name='leaky_relu')
'FullyConnected layer for final output.'
def _fully_connected(self, x, out_dim):
x = tf.reshape(x, [self.hps.batch_size, (-1)]) w = tf.get_variable('DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0)) b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer()) return tf.nn.xw_plus_b(x, w, b)
'Create the deterministic transformation between stochastic layers. If self.hparam.nonlinear: 2 x tanh layers Else: 1 x linear layer'
def _create_transformation(self, input, n_output, reuse, scope_prefix):
if self.hparams.nonlinear: h = slim.fully_connected(input, self.hparams.n_hidden, reuse=reuse, activation_fn=tf.nn.tanh, scope=('%s_nonlinear_1' % scope_prefix)) h = slim.fully_connected(h, self.hparams.n_hidden, reuse=reuse, activation_fn=tf.nn.tanh, scope=('%s_nonlinear_2' % scope_prefix)) h = slim.fully_connected(h, n_output, reuse=reuse, activation_fn=None, scope=('%s' % scope_prefix)) else: h = slim.fully_connected(input, n_output, reuse=reuse, activation_fn=None, scope=('%s' % scope_prefix)) return h
'x values -> samples from Q and return log Q(h|x).'
def _recognition_network(self, sampler=None, log_likelihood_func=None):
samples = {} reuse = (None if (not self.run_recognition_network) else True) if (sampler is None): sampler = self._random_sample if (log_likelihood_func is None): log_likelihood_func = (lambda sample, log_params: U.binary_log_likelihood(sample['activation'], log_params)) logQ = [] if (self.hparams.task in ['sbn', 'omni']): samples[(-1)] = {'activation': self._x} if (self.mean_xs is not None): samples[(-1)]['activation'] -= self.mean_xs samples[(-1)]['activation'] = ((samples[(-1)]['activation'] + 1) / 2.0) with slim.arg_scope([slim.fully_connected], weights_initializer=slim.variance_scaling_initializer(), variables_collections=[Q_COLLECTION]): for i in xrange(self.hparams.n_layer): input = ((2.0 * samples[(i - 1)]['activation']) - 1.0) h = self._create_transformation(input, n_output=self.hparams.n_hidden, reuse=reuse, scope_prefix=('q_%d' % i)) samples[i] = sampler(h, self.uniform_samples[i], i) logQ.append(log_likelihood_func(samples[i], h)) self.run_recognition_network = True return (logQ, samples) elif (self.hparams.task == 'sp'): samples[(-1)] = {'activation': tf.split(self._x, num_or_size_splits=2, axis=1)[0]} if (self.mean_xs is not None): samples[(-1)]['activation'] -= np.split(self.mean_xs, 2, 0)[0] samples[(-1)]['activation'] = ((samples[(-1)]['activation'] + 1) / 2.0) with slim.arg_scope([slim.fully_connected], weights_initializer=slim.variance_scaling_initializer(), variables_collections=[Q_COLLECTION]): for i in xrange(self.hparams.n_layer): input = ((2.0 * samples[(i - 1)]['activation']) - 1.0) h = self._create_transformation(input, n_output=self.hparams.n_hidden, reuse=reuse, scope_prefix=('q_%d' % i)) samples[i] = sampler(h, self.uniform_samples[i], i) logQ.append(log_likelihood_func(samples[i], h)) self.run_recognition_network = True return (logQ, samples)
'Returns learning signal and function. This is the implementation for SBNs for the ELBO. Args: samples: dictionary of sampled latent variables logQ: list of log q(h_i) terms log_likelihood_func: function used to compute log probs for the latent variables Returns: learning_signal: the "reward" function function_term: part of the function that depends on the parameters and needs to have the gradient taken through'
def _generator_network(self, samples, logQ, log_likelihood_func=None):
reuse = (None if (not self.run_generator_network) else True) if (self.hparams.task in ['sbn', 'omni']): if (log_likelihood_func is None): log_likelihood_func = (lambda sample, log_params: U.binary_log_likelihood(sample['activation'], log_params)) logPPrior = log_likelihood_func(samples[(self.hparams.n_layer - 1)], tf.expand_dims(self.prior, 0)) with slim.arg_scope([slim.fully_connected], weights_initializer=slim.variance_scaling_initializer(), variables_collections=[P_COLLECTION]): for i in reversed(xrange(self.hparams.n_layer)): if (i == 0): n_output = self.hparams.n_input else: n_output = self.hparams.n_hidden input = ((2.0 * samples[i]['activation']) - 1.0) h = self._create_transformation(input, n_output, reuse=reuse, scope_prefix=('p_%d' % i)) if (i == 0): logP = U.binary_log_likelihood(self._x, (h + self.train_bias)) else: logPPrior += log_likelihood_func(samples[(i - 1)], h) self.run_generator_network = True return (((logP + logPPrior) - tf.add_n(logQ)), (logP + logPPrior)) elif (self.hparams.task == 'sp'): with slim.arg_scope([slim.fully_connected], weights_initializer=slim.variance_scaling_initializer(), variables_collections=[P_COLLECTION]): n_output = int((self.hparams.n_input / 2)) i = (self.hparams.n_layer - 1) input = ((2.0 * samples[i]['activation']) - 1.0) h = self._create_transformation(input, n_output, reuse=reuse, scope_prefix=('p_%d' % i)) logP = U.binary_log_likelihood(tf.split(self._x, num_or_size_splits=2, axis=1)[1], (h + np.split(self.train_bias, 2, 0)[1])) self.run_generator_network = True return (logP, logP)
'Compute the mean per component variance. Use a moving average to estimate the required moments.'
def compute_tensor_variance(self, t):
t_sq = tf.reduce_mean(tf.square(t)) self.maintain_ema_ops.append(self.ema.apply([t, t_sq])) variance_estimator = (self.ema.average(t_sq) - tf.reduce_mean(tf.square(self.ema.average(t)))) return variance_estimator
'Args: grads_and_vars: gradients to apply and compute running average variance extra_grads_and_vars: gradients to apply (not used to compute average variance)'
def _create_train_op(self, grads_and_vars, extra_grads_and_vars=[]):
first_moment = U.vectorize(grads_and_vars, skip_none=True) second_moment = tf.square(first_moment) self.maintain_ema_ops.append(self.ema.apply([first_moment, second_moment])) if (len(self.baseline_loss) > 0): mean_baseline_loss = tf.reduce_mean(tf.add_n(self.baseline_loss)) extra_grads_and_vars += self.optimizer_class.compute_gradients(mean_baseline_loss, var_list=tf.get_collection('BASELINE')) extra_optimizer = tf.train.AdamOptimizer(learning_rate=(10 * self.hparams.learning_rate), beta2=self.hparams.beta2) with tf.control_dependencies([tf.group(*[g for (g, _) in (grads_and_vars + extra_grads_and_vars) if (g is not None)])]): if self.eval_mode: grads_and_vars = [(g, v) for (g, v) in grads_and_vars if (v not in tf.get_collection(P_COLLECTION))] train_op = self.optimizer_class.apply_gradients(grads_and_vars, global_step=self.global_step) if (len(extra_grads_and_vars) > 0): extra_train_op = extra_optimizer.apply_gradients(extra_grads_and_vars) else: extra_train_op = tf.no_op() self.optimizer = tf.group(train_op, extra_train_op, *self.maintain_ema_ops) variance_estimator = (self.ema.average(second_moment) - tf.square(self.ema.average(first_moment))) self.grad_variance = tf.reduce_mean(variance_estimator)
'Returns mean of random variables parameterized by log_alpha.'
def _mean_sample(self, log_alpha, _, layer):
mu = tf.nn.sigmoid(log_alpha) return {'preactivation': mu, 'activation': mu, 'log_param': log_alpha}
'Convert u to tied randomness in v.'
def _u_to_v(self, log_alpha, u, eps=1e-08):
u_prime = tf.nn.sigmoid((- log_alpha)) v_1 = ((u - u_prime) / tf.clip_by_value((1 - u_prime), eps, 1)) v_1 = tf.clip_by_value(v_1, 0, 1) v_1 = tf.stop_gradient(v_1) v_1 = ((v_1 * (1 - u_prime)) + u_prime) v_0 = (u / tf.clip_by_value(u_prime, eps, 1)) v_0 = tf.clip_by_value(v_0, 0, 1) v_0 = tf.stop_gradient(v_0) v_0 = (v_0 * u_prime) v = tf.where((u > u_prime), v_1, v_0) v = tf.check_numerics(v, 'v sampling is not numerically stable.') v = (v + tf.stop_gradient(((- v) + u))) return v
'Returns sampled random variables parameterized by log_alpha.'
def _random_sample(self, log_alpha, u, layer):
if (layer not in self.uniform_samples_v): self.uniform_samples_v[layer] = self._u_to_v(log_alpha, u) x = ((log_alpha + U.safe_log_prob(u)) - U.safe_log_prob((1 - u))) samples = tf.stop_gradient(tf.to_float((x > 0))) return {'preactivation': x, 'activation': samples, 'log_param': log_alpha}
'Returns sampled random variables parameterized by log_alpha.'
def _random_sample_soft(self, log_alpha, u, layer, temperature=None):
if (temperature is None): temperature = self.hparams.temperature x = ((log_alpha + U.safe_log_prob(u)) - U.safe_log_prob((1 - u))) x /= tf.expand_dims(temperature, (-1)) if self.hparams.muprop_relaxation: y = tf.nn.sigmoid((x + (log_alpha * tf.expand_dims((temperature / (temperature + 1)), (-1))))) else: y = tf.nn.sigmoid(x) return {'preactivation': x, 'activation': y, 'log_param': log_alpha}
'Returns sampled random variables parameterized by log_alpha.'
def _random_sample_soft_v(self, log_alpha, _, layer, temperature=None):
v = self.uniform_samples_v[layer] return self._random_sample_soft(log_alpha, v, layer, temperature)
'Run partial discrete, then continuous path. Args: switch_layer: this layer and beyond will be continuous'
def _random_sample_switch(self, log_alpha, u, layer, switch_layer, temperature=None):
if (layer < switch_layer): return self._random_sample(log_alpha, u, layer) else: return self._random_sample_soft(log_alpha, u, layer, temperature)
'Run partial discrete, then continuous path. Args: switch_layer: this layer and beyond will be continuous'
def _random_sample_switch_v(self, log_alpha, u, layer, switch_layer, temperature=None):
if (layer < switch_layer): return self._random_sample(log_alpha, u, layer) else: return self._random_sample_soft_v(log_alpha, u, layer, temperature)
'Compute the NVIL gradient.'
def get_nvil_gradient(self):
(logQHard, samples) = self._recognition_network() (ELBO, reinforce_model_grad) = self._generator_network(samples, logQHard) logQHard = tf.add_n(logQHard) learning_signal = (tf.stop_gradient(ELBO) - self._create_baseline()) self.baseline_loss.append(tf.square(learning_signal)) optimizerLoss = (- ((tf.stop_gradient(learning_signal) * logQHard) + reinforce_model_grad)) optimizerLoss = tf.reduce_mean(optimizerLoss) nvil_gradient = self.optimizer_class.compute_gradients(optimizerLoss) debug = {'ELBO': ELBO, 'RMS of centered learning signal': U.rms(learning_signal)} return (nvil_gradient, debug)
'Computes the simple muprop gradient. This muprop control variate does not include the linear term.'
def get_simple_muprop_gradient(self):
(logQHard, hardSamples) = self._recognition_network() (hardELBO, reinforce_model_grad) = self._generator_network(hardSamples, logQHard) (logQ, muSamples) = self._recognition_network(sampler=self._mean_sample) (muELBO, _) = self._generator_network(muSamples, logQ) scaling_baseline = self._create_eta(collection='BASELINE') learning_signal = ((hardELBO - (scaling_baseline * muELBO)) - self._create_baseline()) self.baseline_loss.append(tf.square(learning_signal)) optimizerLoss = (- ((tf.stop_gradient(learning_signal) * tf.add_n(logQHard)) + reinforce_model_grad)) optimizerLoss = tf.reduce_mean(optimizerLoss) simple_muprop_gradient = self.optimizer_class.compute_gradients(optimizerLoss) debug = {'ELBO': hardELBO, 'muELBO': muELBO, 'RMS': U.rms(learning_signal)} return (simple_muprop_gradient, debug)
'random sample function that actually returns mean new forward pass that returns logQ as a list can get x_i from samples'
def get_muprop_gradient(self):
(logQHard, hardSamples) = self._recognition_network() (hardELBO, reinforce_model_grad) = self._generator_network(hardSamples, logQHard) (logQ, muSamples) = self._recognition_network(sampler=self._mean_sample) (muELBO, _) = self._generator_network(muSamples, logQ) muELBOGrads = tf.gradients(tf.reduce_sum(muELBO), [muSamples[i]['activation'] for i in xrange(self.hparams.n_layer)]) learning_signal = hardELBO optimizerLoss = 0.0 learning_signals = [] for i in xrange(self.hparams.n_layer): dfDiff = tf.reduce_sum((muELBOGrads[i] * (hardSamples[i]['activation'] - muSamples[i]['activation'])), axis=1) dfMu = tf.reduce_sum((tf.stop_gradient(muELBOGrads[i]) * tf.nn.sigmoid(hardSamples[i]['log_param'])), axis=1) scaling_baseline_0 = self._create_eta(collection='BASELINE') scaling_baseline_1 = self._create_eta(collection='BASELINE') learning_signals.append((((learning_signal - (scaling_baseline_0 * muELBO)) - (scaling_baseline_1 * dfDiff)) - self._create_baseline())) self.baseline_loss.append(tf.square(learning_signals[i])) optimizerLoss += ((logQHard[i] * tf.stop_gradient(learning_signals[i])) + (tf.stop_gradient(scaling_baseline_1) * dfMu)) optimizerLoss += reinforce_model_grad optimizerLoss *= (-1) optimizerLoss = tf.reduce_mean(optimizerLoss) muprop_gradient = self.optimizer_class.compute_gradients(optimizerLoss) debug = {'ELBO': hardELBO, 'muELBO': muELBO} debug.update(dict([(('RMS learning signal layer %d' % i), U.rms(learning_signal)) for (i, learning_signal) in enumerate(learning_signals)])) return (muprop_gradient, debug)
'Calculate gumbel control variate.'
def _create_gumbel_control_variate(self, logQHard, temperature=None):
if (temperature is None): temperature = self.hparams.temperature (logQ, softSamples) = self._recognition_network(sampler=functools.partial(self._random_sample_soft, temperature=temperature)) (softELBO, _) = self._generator_network(softSamples, logQ) logQ = tf.add_n(logQ) (logQ_v, softSamples_v) = self._recognition_network(sampler=functools.partial(self._random_sample_soft_v, temperature=temperature)) (softELBO_v, _) = self._generator_network(softSamples_v, logQ_v) logQ_v = tf.add_n(logQ_v) learning_signal = tf.stop_gradient(softELBO_v) h = (((tf.stop_gradient(learning_signal) * tf.add_n(logQHard)) - softELBO) + softELBO_v) extra = (softELBO_v, ((- softELBO) + softELBO_v)) return (h, extra)
'Calculate gumbel control variate.'
def _create_gumbel_control_variate_quadratic(self, logQHard, temperature=None):
if (temperature is None): temperature = self.hparams.temperature h = 0 extra = [] for layer in xrange(self.hparams.n_layer): (logQ, softSamples) = self._recognition_network(sampler=functools.partial(self._random_sample_switch, switch_layer=layer, temperature=temperature)) (softELBO, _) = self._generator_network(softSamples, logQ) (logQ_v, softSamples_v) = self._recognition_network(sampler=functools.partial(self._random_sample_switch_v, switch_layer=layer, temperature=temperature)) (softELBO_v, _) = self._generator_network(softSamples_v, logQ_v) learning_signal = tf.stop_gradient(softELBO_v) h += (((tf.stop_gradient(learning_signal) * logQHard[layer]) - softELBO) + softELBO_v) extra.append((softELBO_v, ((- softELBO) + softELBO_v))) return (h, extra)
'Get the dynamic rebar gradient (t, eta optimized).'
def get_dynamic_rebar_gradient(self):
tiled_pre_temperature = tf.tile([self.pre_temperature_variable], [self.batch_size]) temperature = tf.exp(tiled_pre_temperature) (hardELBO, nvil_gradient, logQHard) = self._create_hard_elbo() if self.hparams.quadratic: (gumbel_cv, extra) = self._create_gumbel_control_variate_quadratic(logQHard, temperature=temperature) else: (gumbel_cv, extra) = self._create_gumbel_control_variate(logQHard, temperature=temperature) f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean((- nvil_gradient))) eta = {} (h_grads, eta_statistics) = self.multiply_by_eta_per_layer(self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)), eta) model_grads = U.add_grads_and_vars(f_grads, h_grads) total_grads = model_grads g = U.vectorize(model_grads, set_none_to_zero=True) self.maintain_ema_ops.append(self.ema.apply([g])) gbar = 0 variance_objective = tf.reduce_mean(tf.square((g - gbar))) reinf_g_t = 0 if self.hparams.quadratic: for layer in xrange(self.hparams.n_layer): (gumbel_learning_signal, _) = extra[layer] df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0] (reinf_g_t_i, _) = self.multiply_by_eta_per_layer(self.optimizer_class.compute_gradients(tf.reduce_mean((tf.stop_gradient(df_dt) * logQHard[layer]))), eta) reinf_g_t += U.vectorize(reinf_g_t_i, set_none_to_zero=True) reparam = tf.add_n([reparam_i for (_, reparam_i) in extra]) else: (gumbel_learning_signal, reparam) = extra df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0] (reinf_g_t, _) = self.multiply_by_eta_per_layer(self.optimizer_class.compute_gradients(tf.reduce_mean((tf.stop_gradient(df_dt) * tf.add_n(logQHard)))), eta) reinf_g_t = U.vectorize(reinf_g_t, set_none_to_zero=True) (reparam_g, _) = self.multiply_by_eta_per_layer(self.optimizer_class.compute_gradients(tf.reduce_mean(reparam)), eta) reparam_g = U.vectorize(reparam_g, set_none_to_zero=True) reparam_g_t = tf.gradients(tf.reduce_mean(((2 * tf.stop_gradient((g - gbar))) * reparam_g)), self.pre_temperature_variable)[0] variance_objective_grad = (tf.reduce_mean(((2 * (g - gbar)) * reinf_g_t)) + reparam_g_t) debug = {'ELBO': hardELBO, 'etas': eta_statistics, 'variance_objective': variance_objective} return (total_grads, debug, variance_objective, variance_objective_grad)
'Get the rebar gradient.'
def get_rebar_gradient(self):
(hardELBO, nvil_gradient, logQHard) = self._create_hard_elbo() if self.hparams.quadratic: (gumbel_cv, _) = self._create_gumbel_control_variate_quadratic(logQHard) else: (gumbel_cv, _) = self._create_gumbel_control_variate(logQHard) f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean((- nvil_gradient))) eta = {} (h_grads, eta_statistics) = self.multiply_by_eta_per_layer(self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)), eta) model_grads = U.add_grads_and_vars(f_grads, h_grads) total_grads = model_grads variance_objective = tf.reduce_mean(tf.square(U.vectorize(model_grads, set_none_to_zero=True))) debug = {'ELBO': hardELBO, 'etas': eta_statistics, 'variance_objective': variance_objective} return (total_grads, debug, variance_objective)
'Returns sampled random variables parameterized by log_alpha.'
def _random_sample_soft(self, log_alpha, u, layer, temperature=None):
if (temperature is None): temperature = self.hparams.temperature x = ((log_alpha + U.safe_log_prob(u)) - U.safe_log_prob((1 - u))) x /= temperature if self.hparams.muprop_relaxation: x += ((temperature / (temperature + 1)) * log_alpha) y = tf.nn.sigmoid(x) return {'preactivation': x, 'activation': y, 'log_param': log_alpha}
'Add episodes to buffer.'
def add(self, episodes, *args):
idx = 0 while ((self.cur_size < self.max_size) and (idx < len(episodes))): self.buffer[self.cur_size] = episodes[idx] self.cur_size += 1 idx += 1 if (idx < len(episodes)): remove_idxs = self.remove_n((len(episodes) - idx)) for remove_idx in remove_idxs: self.buffer[remove_idx] = episodes[idx] idx += 1 assert (len(self.buffer) == self.cur_size)
'Get n items for removal.'
def remove_n(self, n):
idxs = random.sample(xrange(self.init_length, self.cur_size), n) return idxs
'Get batch of episodes to train on.'
def get_batch(self, n):
idxs = random.sample(xrange(self.cur_size), n) return ([self.buffer[idx] for idx in idxs], None)
'Add episodes to buffer.'
def add(self, episodes, priorities, new_idxs=None):
if (new_idxs is None): idx = 0 new_idxs = [] while ((self.cur_size < self.max_size) and (idx < len(episodes))): self.buffer[self.cur_size] = episodes[idx] new_idxs.append(self.cur_size) self.cur_size += 1 idx += 1 if (idx < len(episodes)): remove_idxs = self.remove_n((len(episodes) - idx)) for remove_idx in remove_idxs: self.buffer[remove_idx] = episodes[idx] new_idxs.append(remove_idx) idx += 1 else: assert (len(new_idxs) == len(episodes)) for (new_idx, ep) in zip(new_idxs, episodes): self.buffer[new_idx] = ep self.priorities[new_idxs] = priorities self.priorities[0:self.init_length] = np.max(self.priorities[self.init_length:]) assert (len(self.buffer) == self.cur_size) return new_idxs
'Get n items for removal.'
def remove_n(self, n):
assert ((self.init_length + n) <= self.cur_size) if (self.eviction_strategy == 'rand'): idxs = random.sample(xrange(self.init_length, self.cur_size), n) elif (self.eviction_strategy == 'fifo'): idxs = [(self.init_length + ((self.remove_idx + i) % (self.max_size - self.init_length))) for i in xrange(n)] self.remove_idx = ((idxs[(-1)] + 1) - self.init_length) elif (self.eviction_strategy == 'rank'): idxs = np.argpartition(self.priorities, n)[:n] return idxs
'Get batch of episodes to train on.'
def get_batch(self, n):
p = self.sampling_distribution() idxs = np.random.choice(self.cur_size, size=n, replace=False, p=p) self.last_batch = idxs return ([self.buffer[idx] for idx in idxs], p[idxs])
'Update last batch idxs with new priority.'
def update_last_batch(self, delta):
self.priorities[self.last_batch] = np.abs(delta) self.priorities[0:self.init_length] = np.max(self.priorities[self.init_length:])
'Optimizer for gradient descent ops.'
def get_optimizer(self, learning_rate):
return tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=0.0002)
'Gradient ops.'
def training_ops(self, loss, learning_rate=None):
opt = self.get_optimizer(learning_rate) params = tf.trainable_variables() grads = tf.gradients(loss, params) if self.clip_norm: (grads, global_norm) = tf.clip_by_global_norm(grads, self.clip_norm) tf.summary.scalar('grad_global_norm', global_norm) return opt.apply_gradients(zip(grads, params))
'Get objective calculations.'
def get(self, rewards, pads, values, final_values, log_probs, prev_log_probs, target_log_probs, entropies, logits):
raise NotImplementedError()
'Exploration bonus.'
def get_bonus(self, total_rewards, total_log_probs):
return ((- self.tau) * total_log_probs)
'Exploration bonus.'
def get_bonus(self, total_rewards, total_log_probs):
discrepancy = ((total_rewards / self.tau) - total_log_probs) normalized_d = (self.num_samples * tf.nn.softmax(discrepancy)) return (self.tau * normalized_d)
'Get RNN cell.'
def get_cell(self):
self.cell_input_dim = (self.internal_dim // 2) cell = tf.contrib.rnn.LSTMCell(self.cell_input_dim, state_is_tuple=False, reuse=tf.get_variable_scope().reuse) cell = tf.contrib.rnn.OutputProjectionWrapper(cell, self.output_dim, reuse=tf.get_variable_scope().reuse) return cell
'Core neural network taking in inputs and outputting sampling distribution parameters.'
def core(self, obs, prev_internal_state, prev_actions):
batch_size = tf.shape(obs[0])[0] if (not self.recurrent): prev_internal_state = tf.zeros([batch_size, self.rnn_state_dim]) cell = self.get_cell() b = tf.get_variable('input_bias', [self.cell_input_dim], initializer=self.vector_init) cell_input = tf.nn.bias_add(tf.zeros([batch_size, self.cell_input_dim]), b) for (i, (obs_dim, obs_type)) in enumerate(self.env_spec.obs_dims_and_types): w = tf.get_variable(('w_state%d' % i), [obs_dim, self.cell_input_dim], initializer=self.matrix_init) if self.env_spec.is_discrete(obs_type): cell_input += tf.matmul(tf.one_hot(obs[i], obs_dim), w) elif self.env_spec.is_box(obs_type): cell_input += tf.matmul(obs[i], w) else: assert False if self.input_prev_actions: if self.env_spec.combine_actions: prev_action = prev_actions[0] for (i, action_dim) in enumerate(self.env_spec.orig_act_dims): act = tf.mod(prev_action, action_dim) w = tf.get_variable(('w_prev_action%d' % i), [action_dim, self.cell_input_dim], initializer=self.matrix_init) cell_input += tf.matmul(tf.one_hot(act, action_dim), w) prev_action = tf.to_int32((prev_action / action_dim)) else: for (i, (act_dim, act_type)) in enumerate(self.env_spec.act_dims_and_types): w = tf.get_variable(('w_prev_action%d' % i), [act_dim, self.cell_input_dim], initializer=self.matrix_init) if self.env_spec.is_discrete(act_type): cell_input += tf.matmul(tf.one_hot(prev_actions[i], act_dim), w) elif self.env_spec.is_box(act_type): cell_input += tf.matmul(prev_actions[i], w) else: assert False (output, next_state) = cell(cell_input, prev_internal_state) return (output, next_state)
'Sample an action from a distribution.'
def sample_action(self, logits, sampling_dim, act_dim, act_type, greedy=False):
if self.env_spec.is_discrete(act_type): if greedy: act = tf.argmax(logits, 1) else: act = tf.reshape(tf.multinomial(logits, 1), [(-1)]) elif self.env_spec.is_box(act_type): means = logits[:, :(sampling_dim / 2)] std = logits[:, (sampling_dim / 2):] if greedy: act = means else: batch_size = tf.shape(logits)[0] act = (means + (std * tf.random_normal([batch_size, act_dim]))) else: assert False return act