repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
bottom-up-attention | bottom-up-attention-master/lib/roi_data_layer/layer.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""The data layer used during training to train a Fast R-CNN network.
RoIDataLayer implements a Caffe Python layer.
"""
import caffe
from fast_rcnn.config import cfg
from roi_data_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
class RoIDataLayer(caffe.Layer):
"""Fast R-CNN data layer used for training."""
def _shuffle_roidb_inds(self, gpu_id=0):
self.gpu_id = gpu_id
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
inds = np.reshape(inds, (-1, 2))
np.random.seed(gpu_id)
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds(self.gpu_id)
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH:
return self._blob_queue.get()
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes)
def set_roidb(self, roidb, gpu_id=0):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds(gpu_id)
if cfg.TRAIN.USE_PREFETCH:
self._blob_queue = Queue(10)
self._prefetch_process = BlobFetcher(self._blob_queue,
self._roidb,
self._num_classes, gpu_id)
self._prefetch_process.start()
# Terminate the child process when the parent exists
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {}
# data blob: holds a batch of N images, each with 3 channels
idx = 0
top[idx].reshape(cfg.TRAIN.IMS_PER_BATCH, 3,
max(cfg.TRAIN.SCALES), cfg.TRAIN.MAX_SIZE)
self._name_to_top_map['data'] = idx
idx += 1
if cfg.TRAIN.HAS_RPN:
top[idx].reshape(1, 3)
self._name_to_top_map['im_info'] = idx
idx += 1
top[idx].reshape(1, 4)
self._name_to_top_map['gt_boxes'] = idx
idx += 1
else: # not using RPN
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[idx].reshape(1, 5, 1, 1)
self._name_to_top_map['rois'] = idx
idx += 1
# labels blob: R categorical labels in [0, ..., K] for K foreground
# classes plus background
top[idx].reshape(1, 1, 1, 1)
self._name_to_top_map['labels'] = idx
idx += 1
if cfg.TRAIN.BBOX_REG:
# bbox_targets blob: R bounding-box regression targets with 4
# targets per class
num_reg_class = 2 if cfg.TRAIN.AGNOSTIC else self._num_classes
top[idx].reshape(1, num_reg_class * 4, 1, 1)
self._name_to_top_map['bbox_targets'] = idx
idx += 1
# bbox_inside_weights blob: At most 4 targets per roi are active;
# thisbinary vector sepcifies the subset of active targets
top[idx].reshape(1, num_reg_class * 4, 1, 1)
self._name_to_top_map['bbox_inside_weights'] = idx
idx += 1
top[idx].reshape(1, num_reg_class * 4, 1, 1)
self._name_to_top_map['bbox_outside_weights'] = idx
idx += 1
print 'RoiDataLayer: name_to_top:', self._name_to_top_map
assert len(top) == len(self._name_to_top_map)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
shape = blob.shape
if len(shape) == 1:
blob = blob.reshape(blob.shape[0], 1, 1, 1)
if len(shape) == 2 and blob_name != 'im_info':
blob = blob.reshape(blob.shape[0], blob.shape[1], 1, 1)
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
class BlobFetcher(Process):
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, queue, roidb, num_classes, gpu_id=0):
super(BlobFetcher, self).__init__()
self._queue = queue
self._roidb = roidb
self._num_classes = num_classes
self._perm = None
self._cur = 0
self.gpu_id = gpu_id
np.random.seed(gpu_id)
self._shuffle_roidb_inds()
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
# TODO(rbg): remove duplicated code
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
# TODO(rbg): remove duplicated code
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def run(self):
print 'BlobFetcher started'
while True:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs = get_minibatch(minibatch_db, self._num_classes)
self._queue.put(blobs)
| 7,856 | 37.326829 | 81 | py |
bottom-up-attention | bottom-up-attention-master/lib/roi_data_layer/minibatch.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import scipy.sparse as sparse
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
num_reg_class = 2 if cfg.TRAIN.AGNOSTIC else num_classes
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0) or (cfg.TRAIN.BATCH_SIZE == -1), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = np.inf if cfg.TRAIN.BATCH_SIZE == -1 else cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
if cfg.TRAIN.HAS_RPN:
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
num_gt = len(gt_inds)
assert num_gt > 0, "gt must not be empty"
if cfg.TRAIN.HAS_ATTRIBUTES:
if cfg.TRAIN.HAS_RELATIONS:
gt_boxes = np.zeros((num_gt, 21 + num_gt), dtype=np.float32)
else:
gt_boxes = np.zeros((num_gt, 21), dtype=np.float32)
else:
gt_boxes = np.zeros((num_gt, 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
if cfg.TRAIN.HAS_ATTRIBUTES:
gt_boxes[:, 5:21] = roidb[0]['gt_attributes'][gt_inds].toarray()
if cfg.TRAIN.HAS_RELATIONS:
assert num_gt == roidb[0]['gt_classes'].shape[0], \
"Generation of gt_relations doesn't accomodate dropping objects"
coords = roidb[0]['gt_relations'] # i,relation,j
if coords.size > 0:
assert num_gt > coords.max(axis=0)[0], \
"gt_relations subject index exceeds number of objects"
assert num_gt > coords.max(axis=0)[2], \
"gt_relations object index exceeds number of objects"
np.random.shuffle(coords) # There may be multiple relations between same objects
rel_matrix = gt_boxes[:, 21:]
for r in range(coords.shape[0]):
rel_matrix[coords[r,0],coords[r,2]] = coords[r,1]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
else: # not using RPN
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_reg_class), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_inside_weights \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_inside_weights'] = bbox_inside_blob
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32)
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][keep_inds, :], num_classes)
return labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
num_reg_class = 2 if cfg.TRAIN.AGNOSTIC else num_classes
bbox_targets = np.zeros((clss.size, 4 * num_reg_class), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
if cfg.TRAIN.AGNOSTIC:
for ind in inds:
cls = clss[ind]
start = 4 * (1 if cls > 0 else 0)
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
else:
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| 10,006 | 41.046218 | 96 | py |
bottom-up-attention | bottom-up-attention-master/lib/fast_rcnn/test.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from fast_rcnn.nms_wrapper import nms, soft_nms
import cPickle
from utils.blob import im_list_to_blob
import os
from utils.cython_bbox import bbox_overlaps
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def im_detect(net, im, boxes=None, force_boxes=False):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
attr_scores (ndarray): R x M array of attribute class scores
"""
blobs, im_scales = _get_blobs(im, boxes)
if force_boxes:
blobs['rois'] = _get_rois_blob(boxes, im_scales)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if 'im_info' in net.blobs:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
if force_boxes or not cfg.TEST.HAS_RPN:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if 'im_info' in net.blobs:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
if force_boxes or not cfg.TEST.HAS_RPN:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN and not force_boxes:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
if 'attr_prob' in net.blobs:
attr_scores = blobs_out['attr_prob']
else:
attr_scores = None
if 'rel_prob' in net.blobs:
rel_scores = blobs_out['rel_prob']
else:
rel_scores = None
return scores, pred_boxes, attr_scores, rel_scores
def vis_detections(im, class_name, dets, thresh=0.3, filename='vis.png'):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
plt.cla()
plt.imshow(im)
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
plt.savefig('./data/vis/%s' % filename)
def vis_multiple(im, class_names, all_boxes, filename='vis.png'):
"""Visual debugging of detections."""
print filename
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
plt.cla()
plt.imshow(im)
max_boxes = 10
image_scores = np.hstack([all_boxes[j][:, 4]
for j in xrange(1, len(class_names))])
if len(image_scores) > 10:
image_thresh = np.sort(image_scores)[-max_boxes]
else:
image_thresh = -np.inf
for j in xrange(1, len(class_names)):
keep = np.where(all_boxes[j][:, 4] >= image_thresh)[0]
dets = all_boxes[j][keep, :]
for i in range(dets.shape[0]):
bbox = dets[i, :4]
score = dets[i, -1]
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=1)
)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_names[j], score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=8, color='white')
plt.title('Best %d Attributes using gt boxes' % max_boxes)
plt.show()
plt.savefig('./data/vis/%s' % filename)
def vis_relations(im, class_names, box_proposals, scores, filename='vis.png'):
n = box_proposals.shape[0]
assert scores.shape[0] == n*n
print filename
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
plt.cla()
plt.imshow(im)
max_rels = 5
scores = scores[:, 1:]
image_scores = scores.flatten()
if len(image_scores) > 10:
image_thresh = np.sort(image_scores)[-max_rels]
else:
image_thresh = -np.inf
for i in xrange(n):
for j in xrange(n):
keep = np.where(scores[i*n+j] >= image_thresh)[0]
for ix in keep:
bbox = box_proposals[i]
score = scores[i*n+j, ix]
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=1)
)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_names[ix], score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=8, color='white')
bbox = box_proposals[j]
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=1)
)
plt.title('Best %d Relations using gt boxes' % max_rels)
plt.show()
plt.savefig('./data/vis/%s' % filename)
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# CPU NMS is much faster than GPU NMS when the number of boxes
# is relative small (e.g., < 10k)
# TODO(rbg): autotune NMS dispatch
keep = nms(dets, thresh, force_cpu=True)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb, max_per_image=400, thresh=-np.inf, vis=False, load_cache=False):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, net)
det_file = os.path.join(output_dir, 'detections.pkl')
if load_cache and os.path.exists(det_file):
print 'Loading pickled detections from %s' % det_file
with open(det_file, 'rb') as f:
all_boxes = cPickle.load(f)
else:
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
for i in xrange(num_images):
# filter out any ground truth boxes
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
# The roidb may contain ground-truth rois (for example, if the roidb
# comes from the training or val split). We only want to evaluate
# detection on the *non*-ground-truth rois. We select those the rois
# that have the gt_classes field set to 0, which means there's no
# ground truth.
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes, attr_scores, rel_scores = im_detect(net, im, box_proposals)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
for j in xrange(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
if cfg.TEST.AGNOSTIC:
cls_boxes = boxes[inds, 4:8]
else:
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
#keep = soft_nms(cls_dets, method=cfg.TEST.SOFT_NMS)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
if vis:
vis_detections(im, imdb.classes[j], cls_dets)
all_boxes[j][i] = cls_dets
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, 4]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, 4] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Evaluating detections'
imdb.evaluate_detections(all_boxes, output_dir)
def test_net_with_gt_boxes(net, imdb, max_per_image=400, thresh=-np.inf, vis=False, load_cache=False):
"""Test a Fast R-CNN network on an image database, evaluating attribute
and relation detections given ground truth boxes."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_attributes)]
rel_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_relations)]
output_dir = get_output_dir(imdb, net, attributes=True)
det_file = os.path.join(output_dir, 'attribute_detections.pkl')
rel_file = os.path.join(output_dir, 'relation_detections.pkl')
if load_cache and os.path.exists(det_file):
print 'Loading pickled detections from %s' % det_file
with open(det_file, 'rb') as f:
all_boxes = cPickle.load(f)
with open(rel_file, 'rb') as f:
rel_boxes = cPickle.load(f)
else:
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
roidb = imdb.gt_roidb()
for i in xrange(num_images):
box_proposals = roidb[i]['boxes']
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes, attr_scores, rel_scores = im_detect(net, im, box_proposals, force_boxes=True)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the no attribute class
if attr_scores.shape[1] < imdb.num_attributes:
attr_scores = np.hstack((np.zeros((attr_scores.shape[0],1)),attr_scores))
if rel_scores and rel_scores.shape[1] < imdb.num_relations:
rel_scores = np.hstack((np.zeros((rel_scores.shape[0],1)),rel_scores))
for j in xrange(1, imdb.num_attributes):
inds = np.where(attr_scores[:, j] > thresh)[0]
cls_scores = attr_scores[inds, j]
cls_boxes = box_proposals[inds, :]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
all_boxes[j][i] = cls_dets
# Limit to max_per_image detections *over all attributes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, 4]
for j in xrange(1, imdb.num_attributes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_attributes):
keep = np.where(all_boxes[j][i][:, 4] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
im_boxes = [all_boxes[j][i] for j in xrange(imdb.num_attributes)]
vis_multiple(im, imdb.attributes, im_boxes, filename='attr_%d.png' % i)
if rel_scores:
vis_relations(im, imdb.relations, box_proposals, rel_scores, filename='rel_%d.png' % i)
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Evaluating attribute and / or relation detections'
imdb.evaluate_attributes(all_boxes, output_dir)
| 19,368 | 38.690574 | 123 | py |
bottom-up-attention | bottom-up-attention-master/lib/fast_rcnn/train_multi_gpu.py | # --------------------------------------------------------
# Written by Bharat Singh
# Modified version of py-R-FCN
# --------------------------------------------------------
"""Train a Fast R-CNN network."""
import caffe
from fast_rcnn.config import cfg
import roi_data_layer.roidb as rdl_roidb
from utils.timer import Timer
import numpy as np
import os
from caffe.proto import caffe_pb2
import google.protobuf as pb2
import google.protobuf.text_format
from multiprocessing import Process
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, solver_prototxt, roidb, output_dir, gpu_id,
pretrained_model=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
self.gpu_id = gpu_id
if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS):
# RPN can only use precomputed normalization because there are no
# fixed statistics to compute a priori
assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED
if cfg.TRAIN.BBOX_REG:
print 'Computing bounding-box regression targets...'
self.bbox_means, self.bbox_stds = \
rdl_roidb.add_bbox_regression_targets(roidb)
print 'done'
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
self.solver.net.layers[0].set_roidb(roidb, gpu_id)
def snapshot(self):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.solver.net
scale_bbox_params_faster_rcnn = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
net.params.has_key('bbox_pred'))
scale_bbox_params_rfcn = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
net.params.has_key('rfcn_bbox'))
scale_bbox_params_rpn = (cfg.TRAIN.RPN_NORMALIZE_TARGETS and
net.params.has_key('rpn_bbox_pred'))
if scale_bbox_params_faster_rcnn:
# save original values
orig_0 = net.params['bbox_pred'][0].data.copy()
orig_1 = net.params['bbox_pred'][1].data.copy()
# scale and shift with bbox reg unnormalization; then save snapshot
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data *
self.bbox_stds[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data *
self.bbox_stds + self.bbox_means)
if scale_bbox_params_rpn:
rpn_orig_0 = net.params['rpn_bbox_pred'][0].data.copy()
rpn_orig_1 = net.params['rpn_bbox_pred'][1].data.copy()
num_anchor = rpn_orig_0.shape[0] / 4
# scale and shift with bbox reg unnormalization; then save snapshot
self.rpn_means = np.tile(np.asarray(cfg.TRAIN.RPN_NORMALIZE_MEANS),
num_anchor)
self.rpn_stds = np.tile(np.asarray(cfg.TRAIN.RPN_NORMALIZE_STDS),
num_anchor)
net.params['rpn_bbox_pred'][0].data[...] = \
(net.params['rpn_bbox_pred'][0].data *
self.rpn_stds[:, np.newaxis, np.newaxis, np.newaxis])
net.params['rpn_bbox_pred'][1].data[...] = \
(net.params['rpn_bbox_pred'][1].data *
self.rpn_stds + self.rpn_means)
if scale_bbox_params_rfcn:
# save original values
orig_0 = net.params['rfcn_bbox'][0].data.copy()
orig_1 = net.params['rfcn_bbox'][1].data.copy()
repeat = orig_1.shape[0] / self.bbox_means.shape[0]
# scale and shift with bbox reg unnormalization; then save snapshot
net.params['rfcn_bbox'][0].data[...] = \
(net.params['rfcn_bbox'][0].data *
np.repeat(self.bbox_stds, repeat).reshape((orig_1.shape[0], 1, 1, 1)))
net.params['rfcn_bbox'][1].data[...] = \
(net.params['rfcn_bbox'][1].data *
np.repeat(self.bbox_stds, repeat) + np.repeat(self.bbox_means, repeat))
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (self.solver_param.snapshot_prefix + infix +
'_iter_{:d}'.format(self.solver.iter) + '.caffemodel')
filename = os.path.join(self.output_dir, filename)
if self.gpu_id == 0:
net.save(str(filename))
print 'Wrote snapshot to: {:s}'.format(filename)
if scale_bbox_params_faster_rcnn:
# restore net to original state
net.params['bbox_pred'][0].data[...] = orig_0
net.params['bbox_pred'][1].data[...] = orig_1
if scale_bbox_params_rfcn:
# restore net to original state
net.params['rfcn_bbox'][0].data[...] = orig_0
net.params['rfcn_bbox'][1].data[...] = orig_1
if scale_bbox_params_rpn:
# restore net to original state
net.params['rpn_bbox_pred'][0].data[...] = rpn_orig_0
net.params['rpn_bbox_pred'][1].data[...] = rpn_orig_1
return filename
def track_memory(self):
net = self.solver.net
print 'Memory Usage:'
total = 0.0
data = 0.0
params = 0.0
for k,v in net.blobs.iteritems():
gb = float(v.data.nbytes)/1024/1024/1024
print '%s : %.3f GB %s' % (k,gb,v.data.shape)
total += gb
data += gb
print 'Memory Usage: Data %.3f GB' % data
for k,v in net.params.iteritems():
for i,p in enumerate(v):
gb = float(p.data.nbytes)/1024/1024/1024
total += gb
params += gb
print '%s[%d] : %.3f GB %s' % (k,i,gb,p.data.shape)
print 'Memory Usage: Params %.3f GB' % params
print 'Memory Usage: Total %.3f GB' % total
def getSolver(self):
return self.solver
def solve(proto, roidb, pretrained_model, gpus, uid, rank, output_dir, max_iter):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
cfg.GPU_ID = gpus[rank]
solverW = SolverWrapper(proto, roidb, output_dir,rank,pretrained_model)
solver = solverW.getSolver()
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
count = 0
while count < max_iter:
print 'Solver step'
solver.step(cfg.TRAIN.SNAPSHOT_ITERS)
if rank == 0:
solverW.snapshot()
#solverW.track_memory()
count = count + cfg.TRAIN.SNAPSHOT_ITERS
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
print 'Preparing training data...'
rdl_roidb.prepare_roidb(imdb)
print 'done'
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print 'Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after)
return filtered_roidb
def train_net_multi_gpu(solver_prototxt, roidb, output_dir, pretrained_model, max_iter, gpus):
"""Train a Fast R-CNN network."""
uid = caffe.NCCL.new_uid()
caffe.init_log()
caffe.log('Using devices %s' % str(gpus))
procs = []
for rank in range(len(gpus)):
p = Process(target=solve,
args=(solver_prototxt, roidb, pretrained_model, gpus, uid, rank, output_dir, max_iter))
p.daemon = False
p.start()
procs.append(p)
for p in procs:
p.join()
| 9,558 | 37.857724 | 107 | py |
bottom-up-attention | bottom-up-attention-master/lib/fast_rcnn/config.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 10000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
__C.TRAIN.RPN_NORMALIZE_TARGETS = False
__C.TRAIN.RPN_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.RPN_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'selective_search'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
__C.TRAIN.ASPECT_GROUPING = True
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = False
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# whether use class aware box or not
__C.TRAIN.AGNOSTIC = False
# Detect attributes of objects
__C.TRAIN.HAS_ATTRIBUTES = False
# Detect relations between objects
__C.TRAIN.HAS_RELATIONS = False
# Fraction of relation minibatch that is labeled with a relation (i.e. class > 0)
__C.TRAIN.MIN_RELATION_FRACTION = 0.25
#
# Testing options
#
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Flag for soft-NMS method. 0 performs standard NMS, 1 performs soft-NMS with linear weighting and
# 2 performs soft-NMS with Gaussian weighting
__C.TEST.SOFT_NMS = 0
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'selective_search'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
# whether use class aware box or not
__C.TEST.AGNOSTIC = False
# Detect attributes of objects
__C.TEST.HAS_ATTRIBUTES = False
# Detect relations between objects
__C.TEST.HAS_RELATIONS = False
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Model directory
__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models', 'pascal_voc'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net=None, attributes=False):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is not None:
outdir = osp.join(outdir, net.name)
if attributes:
outdir = osp.join(outdir, "attr")
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 10,118 | 31.329073 | 99 | py |
bottom-up-attention | bottom-up-attention-master/lib/fast_rcnn/train.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Fast R-CNN network."""
import caffe
from fast_rcnn.config import cfg
import roi_data_layer.roidb as rdl_roidb
from utils.timer import Timer
import numpy as np
import os
from caffe.proto import caffe_pb2
import google.protobuf as pb2
import google.protobuf.text_format as text_format
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, solver_prototxt, roidb, output_dir,
pretrained_model=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS):
# RPN can only use precomputed normalization because there are no
# fixed statistics to compute a priori
assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED
if cfg.TRAIN.BBOX_REG:
print 'Computing bounding-box regression targets...'
self.bbox_means, self.bbox_stds = \
rdl_roidb.add_bbox_regression_targets(roidb)
print 'done'
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
text_format.Merge(f.read(), self.solver_param)
self.solver.net.layers[0].set_roidb(roidb, cfg.GPU_ID)
def snapshot(self):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.solver.net
scale_bbox_params_faster_rcnn = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
net.params.has_key('bbox_pred'))
scale_bbox_params_rfcn = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
net.params.has_key('rfcn_bbox'))
scale_bbox_params_rpn = (cfg.TRAIN.RPN_NORMALIZE_TARGETS and
net.params.has_key('rpn_bbox_pred'))
if scale_bbox_params_faster_rcnn:
# save original values
orig_0 = net.params['bbox_pred'][0].data.copy()
orig_1 = net.params['bbox_pred'][1].data.copy()
# scale and shift with bbox reg unnormalization; then save snapshot
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data *
self.bbox_stds[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data *
self.bbox_stds + self.bbox_means)
if scale_bbox_params_rpn:
rpn_orig_0 = net.params['rpn_bbox_pred'][0].data.copy()
rpn_orig_1 = net.params['rpn_bbox_pred'][1].data.copy()
num_anchor = rpn_orig_0.shape[0] / 4
# scale and shift with bbox reg unnormalization; then save snapshot
self.rpn_means = np.tile(np.asarray(cfg.TRAIN.RPN_NORMALIZE_MEANS),
num_anchor)
self.rpn_stds = np.tile(np.asarray(cfg.TRAIN.RPN_NORMALIZE_STDS),
num_anchor)
net.params['rpn_bbox_pred'][0].data[...] = \
(net.params['rpn_bbox_pred'][0].data *
self.rpn_stds[:, np.newaxis, np.newaxis, np.newaxis])
net.params['rpn_bbox_pred'][1].data[...] = \
(net.params['rpn_bbox_pred'][1].data *
self.rpn_stds + self.rpn_means)
if scale_bbox_params_rfcn:
# save original values
orig_0 = net.params['rfcn_bbox'][0].data.copy()
orig_1 = net.params['rfcn_bbox'][1].data.copy()
repeat = orig_1.shape[0] / self.bbox_means.shape[0]
# scale and shift with bbox reg unnormalization; then save snapshot
net.params['rfcn_bbox'][0].data[...] = \
(net.params['rfcn_bbox'][0].data *
np.repeat(self.bbox_stds, repeat).reshape((orig_1.shape[0], 1, 1, 1)))
net.params['rfcn_bbox'][1].data[...] = \
(net.params['rfcn_bbox'][1].data *
np.repeat(self.bbox_stds, repeat) + np.repeat(self.bbox_means, repeat))
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (self.solver_param.snapshot_prefix + infix +
'_iter_{:d}'.format(self.solver.iter) + '.caffemodel')
filename = os.path.join(self.output_dir, filename)
net.save(str(filename))
print 'Wrote snapshot to: {:s}'.format(filename)
if scale_bbox_params_faster_rcnn:
# restore net to original state
net.params['bbox_pred'][0].data[...] = orig_0
net.params['bbox_pred'][1].data[...] = orig_1
if scale_bbox_params_rfcn:
# restore net to original state
net.params['rfcn_bbox'][0].data[...] = orig_0
net.params['rfcn_bbox'][1].data[...] = orig_1
if scale_bbox_params_rpn:
# restore net to original state
net.params['rpn_bbox_pred'][0].data[...] = rpn_orig_0
net.params['rpn_bbox_pred'][1].data[...] = rpn_orig_1
return filename
def train_model(self, max_iters):
"""Network training loop."""
last_snapshot_iter = -1
timer = Timer()
model_paths = []
while self.solver.iter < max_iters:
# Make one SGD update
timer.tic()
self.solver.step(1)
timer.toc()
if self.solver.iter % (10 * self.solver_param.display) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = self.solver.iter
model_paths.append(self.snapshot())
if last_snapshot_iter != self.solver.iter:
model_paths.append(self.snapshot())
return model_paths
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
print 'Preparing training data...'
rdl_roidb.prepare_roidb(imdb)
print 'done'
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print 'Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after)
return filtered_roidb
def train_net(solver_prototxt, roidb, output_dir,
pretrained_model=None, max_iters=40000):
"""Train a Fast R-CNN network."""
roidb = filter_roidb(roidb)
sw = SolverWrapper(solver_prototxt, roidb, output_dir,
pretrained_model=pretrained_model)
print 'Solving...'
model_paths = sw.train_model(max_iters)
print 'done solving'
return model_paths
| 8,539 | 39.861244 | 92 | py |
bottom-up-attention | bottom-up-attention-master/lib/rpn/proposal_layer.py | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import caffe
import numpy as np
import yaml
from fast_rcnn.config import cfg
from generate_anchors import generate_anchors
from fast_rcnn.bbox_transform import bbox_transform_inv, clip_boxes
from fast_rcnn.nms_wrapper import nms
DEBUG = False
DEBUG_SHAPE = False
class ProposalLayer(caffe.Layer):
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def setup(self, bottom, top):
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str)
self._feat_stride = layer_params['feat_stride']
anchor_scales = layer_params.get('scales', (8, 16, 32))
self._anchors = generate_anchors(scales=np.array(anchor_scales))
self._num_anchors = self._anchors.shape[0]
if DEBUG:
print 'feat_stride: {}'.format(self._feat_stride)
print 'anchors:'
print self._anchors
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[0].reshape(1, 5)
# scores blob: holds scores for R regions of interest
if len(top) > 1:
top[1].reshape(1, 1, 1, 1)
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
assert bottom[0].data.shape[0] == 1, \
'Only single item batches are supported'
cfg_key = str('TRAIN' if self.phase == 0 else 'TEST') # either 'TRAIN' or 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
scores = bottom[0].data[:, self._num_anchors:, :, :]
bbox_deltas = bottom[1].data
im_info = bottom[2].data[0, :]
if DEBUG:
print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
print 'scale: {}'.format(im_info[2])
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
if DEBUG:
print 'score map size: {}'.format(scores.shape)
# Enumerate all shifts
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
anchors = self._anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
#
# bbox deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
if cfg_key == 'TRAIN' and cfg.TRAIN.RPN_NORMALIZE_TARGETS:
bbox_deltas *= cfg.TRAIN.RPN_NORMALIZE_STDS
bbox_deltas += cfg.TRAIN.RPN_NORMALIZE_MEANS
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via bbox transformations
proposals = bbox_transform_inv(anchors, bbox_deltas)
# 2. clip predicted boxes to image
proposals = clip_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = _filter_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :]
scores = scores[keep]
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
keep = nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
# print blob.shape
top[0].reshape(*(blob.shape))
top[0].data[...] = blob
if DEBUG_SHAPE:
print 'ProposalLayer top[0] size: {}'.format(top[0].data.shape)
# [Optional] output scores blob
if len(top) > 1:
top[1].reshape(*(scores.shape))
top[1].data[...] = scores
if DEBUG_SHAPE:
print 'ProposalLayer top[0] size: {}'.format(top[0].data.shape)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
| 7,265 | 38.064516 | 88 | py |
bottom-up-attention | bottom-up-attention-master/lib/rpn/proposal_target_layer.py | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import caffe
import yaml
import numpy as np
import numpy.random as npr
from fast_rcnn.config import cfg
from fast_rcnn.bbox_transform import bbox_transform
from utils.cython_bbox import bbox_overlaps
DEBUG = False
DEBUG_SHAPE = False
class ProposalTargetLayer(caffe.Layer):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
self._count = 0
self._fg_num = 0
self._bg_num = 0
layer_params = yaml.load(self.param_str)
self._num_classes = layer_params['num_classes']
if 'num_attr_classes' in layer_params:
self._num_attr_classes = layer_params['num_attr_classes']
else:
self._num_attr_classes = 0
if 'num_rel_classes' in layer_params:
self._num_rel_classes = layer_params['num_rel_classes']
else:
self._num_rel_classes = 0
if 'ignore_label' in layer_params:
self._ignore_label = layer_params['ignore_label']
else:
self._ignore_label = -1
rois_per_image = 1 if cfg.TRAIN.BATCH_SIZE == -1 else cfg.TRAIN.BATCH_SIZE
# sampled rois (0, x1, y1, x2, y2)
top[0].reshape(rois_per_image, 5, 1, 1)
# labels
top[1].reshape(rois_per_image, 1, 1, 1)
# bbox_targets
top[2].reshape(rois_per_image, self._num_classes * 4, 1, 1)
# bbox_inside_weights
top[3].reshape(rois_per_image, self._num_classes * 4, 1, 1)
# bbox_outside_weights
top[4].reshape(rois_per_image, self._num_classes * 4, 1, 1)
ix = 5
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)
if self._num_attr_classes > 0:
# attribute labels
top[ix].reshape(fg_rois_per_image, 16)
ix += 1
if self._num_rel_classes > 0:
# relation labels
top[ix].reshape(fg_rois_per_image*fg_rois_per_image, 1, 1, 1)
def forward(self, bottom, top):
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
all_rois = bottom[0].data
# GT boxes (x1, y1, x2, y2, label, attributes[16], relations[num_objs])
# TODO(rbg): it's annoying that sometimes I have extra info before
# and other times after box coordinates -- normalize to one format
gt_boxes = bottom[1].data
gt_boxes = gt_boxes.reshape(gt_boxes.shape[0], gt_boxes.shape[1])
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes[:, :4])))
)
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
rois_per_image = np.inf if cfg.TRAIN.BATCH_SIZE == -1 else cfg.TRAIN.BATCH_SIZE
fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))
# Sample rois with classification labels and bounding box regression
# targets
# print 'proposal_target_layer:', fg_rois_per_image
labels, rois, bbox_targets, bbox_inside_weights, attributes, relations = _sample_rois(
all_rois, gt_boxes, fg_rois_per_image,
rois_per_image, self._num_classes, self._num_attr_classes,
self._num_rel_classes, self._ignore_label)
if self._num_attr_classes > 0:
assert attributes is not None
if self._num_rel_classes > 0:
assert relations is not None
if DEBUG:
print 'num fg: {}'.format((labels > 0).sum())
print 'num bg: {}'.format((labels == 0).sum())
self._count += 1
self._fg_num += (labels > 0).sum()
self._bg_num += (labels == 0).sum()
print 'num fg avg: {}'.format(self._fg_num / self._count)
print 'num bg avg: {}'.format(self._bg_num / self._count)
print 'ratio: {:.3f}'.format(float(self._fg_num) / float(self._bg_num))
# sampled rois
# modified by ywxiong
rois = rois.reshape((rois.shape[0], rois.shape[1], 1, 1))
top[0].reshape(*rois.shape)
top[0].data[...] = rois
# classification labels
# modified by ywxiong
labels = labels.reshape((labels.shape[0], 1, 1, 1))
top[1].reshape(*labels.shape)
top[1].data[...] = labels
# bbox_targets
# modified by ywxiong
bbox_targets = bbox_targets.reshape((bbox_targets.shape[0], bbox_targets.shape[1], 1, 1))
top[2].reshape(*bbox_targets.shape)
top[2].data[...] = bbox_targets
# bbox_inside_weights
# modified by ywxiong
bbox_inside_weights = bbox_inside_weights.reshape((bbox_inside_weights.shape[0], bbox_inside_weights.shape[1], 1, 1))
top[3].reshape(*bbox_inside_weights.shape)
top[3].data[...] = bbox_inside_weights
# bbox_outside_weights
# modified by ywxiong
bbox_inside_weights = bbox_inside_weights.reshape((bbox_inside_weights.shape[0], bbox_inside_weights.shape[1], 1, 1))
top[4].reshape(*bbox_inside_weights.shape)
top[4].data[...] = np.array(bbox_inside_weights > 0).astype(np.float32)
#attribute labels
ix = 5
if self._num_attr_classes > 0:
attributes[:,1:][attributes[:,1:]==0] = self._ignore_label
top[ix].reshape(*attributes.shape)
top[ix].data[...] = attributes
ix += 1
# relation labels
if self._num_rel_classes > 0:
top[ix].reshape(*relations.shape)
top[ix].data[...] = relations
if DEBUG_SHAPE:
for i in range(len(top)):
print 'ProposalTargetLayer top[{}] size: {}'.format(i, top[i].data.shape)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
# print 'proposal_target_layer:', bbox_targets.shape
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
if cfg.TRAIN.AGNOSTIC:
for ind in inds:
cls = clss[ind]
start = 4 * (1 if cls > 0 else 0)
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
else:
for ind in inds:
cls = clss[ind]
start = int(4 * cls)
end = int(start + 4)
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))
/ np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes,
num_attr_classes, num_rel_classes, ignore_label):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
# GT boxes (x1, y1, x2, y2, label, attributes[16], relations[num_objs])
has_attributes = num_attr_classes > 0
if has_attributes:
assert gt_boxes.shape[1] >= 21
has_relations = num_rel_classes > 0
if has_relations:
assert gt_boxes.shape[0] == gt_boxes.shape[1]-21, \
'relationships not found in gt_boxes, item length is only %d' % gt_boxes.shape[1]
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = int(min(fg_rois_per_image, fg_inds.size))
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = int(min(bg_rois_per_this_image, bg_inds.size))
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# print 'proposal_target_layer:', keep_inds
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0 / ignore_label
labels[fg_rois_per_this_image:] = 0
fg_gt = np.array(gt_assignment[fg_inds])
if has_attributes:
attributes = np.ones((fg_rois_per_image,16))*ignore_label
attributes[:fg_rois_per_this_image,:] = gt_boxes[fg_gt, 5:21]
np.place(attributes[:,1:],attributes[:,1:] == 0, ignore_label)
else:
attributes = None
if has_relations:
expand_rels = gt_boxes[fg_gt, 21:].T[fg_gt].T
num_relations_per_this_image = np.count_nonzero(expand_rels)
# Keep an equal number of 'no relation' outputs, the rest can be ignore
expand_rels = expand_rels.flatten()
no_rel_inds = np.where(expand_rels==0)[0]
if len(no_rel_inds) > num_relations_per_this_image:
no_rel_inds = npr.choice(no_rel_inds, size=num_relations_per_this_image, replace=False)
np.place(expand_rels,expand_rels==0,ignore_label)
expand_rels[no_rel_inds] = 0
relations = np.ones((fg_rois_per_image,fg_rois_per_image),dtype=np.float)*ignore_label
relations[:fg_rois_per_this_image,:fg_rois_per_this_image] = expand_rels.reshape((fg_rois_per_this_image,fg_rois_per_this_image))
relations = relations.reshape((relations.size, 1, 1, 1))
else:
relations = None
rois = all_rois[keep_inds]
# print 'proposal_target_layer:', rois
bbox_target_data = _compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)
# print 'proposal_target_layer:', bbox_target_data
bbox_targets, bbox_inside_weights = \
_get_bbox_regression_labels(bbox_target_data, num_classes)
return labels, rois, bbox_targets, bbox_inside_weights, attributes, relations
| 12,616 | 41.625 | 137 | py |
bottom-up-attention | bottom-up-attention-master/lib/rpn/anchor_target_layer.py | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import os
import caffe
import yaml
from fast_rcnn.config import cfg
import numpy as np
import numpy.random as npr
from generate_anchors import generate_anchors
from utils.cython_bbox import bbox_overlaps
from fast_rcnn.bbox_transform import bbox_transform
DEBUG = False
class AnchorTargetLayer(caffe.Layer):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str)
anchor_scales = layer_params.get('scales', (8, 16, 32))
self._anchors = generate_anchors(scales=np.array(anchor_scales))
self._num_anchors = self._anchors.shape[0]
self._feat_stride = layer_params['feat_stride']
if DEBUG:
print 'anchors:'
print self._anchors
print 'anchor shapes:'
print np.hstack((
self._anchors[:, 2::4] - self._anchors[:, 0::4],
self._anchors[:, 3::4] - self._anchors[:, 1::4],
))
self._counts = cfg.EPS
self._sums = np.zeros((1, 4))
self._squared_sums = np.zeros((1, 4))
self._fg_sum = 0
self._bg_sum = 0
self._count = 0
# allow boxes to sit over the edge by a small amount
self._allowed_border = layer_params.get('allowed_border', 0)
height, width = bottom[0].data.shape[-2:]
if DEBUG:
print 'AnchorTargetLayer: height', height, 'width', width
A = self._num_anchors
# labels
top[0].reshape(1, 1, A * height, width)
# bbox_targets
top[1].reshape(1, A * 4, height, width)
# bbox_inside_weights
top[2].reshape(1, A * 4, height, width)
# bbox_outside_weights
top[3].reshape(1, A * 4, height, width)
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate 9 anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the 9 anchors
# filter out-of-image anchors
# measure GT overlap
assert bottom[0].data.shape[0] == 1, \
'Only single item batches are supported'
# map of shape (..., H, W)
height, width = bottom[0].data.shape[-2:]
# GT boxes (x1, y1, x2, y2, label, ...)
gt_boxes = bottom[1].data[:,:5]
# im_info
im_info = bottom[2].data[0, :]
if DEBUG:
print ''
print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
print 'scale: {}'.format(im_info[2])
print 'height, width: ({}, {})'.format(height, width)
print 'rpn: gt_boxes.shape', gt_boxes.shape
print 'rpn: gt_boxes', gt_boxes
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
all_anchors = (self._anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
# only keep anchors inside the image
inds_inside = np.where(
(all_anchors[:, 0] >= -self._allowed_border) &
(all_anchors[:, 1] >= -self._allowed_border) &
(all_anchors[:, 2] < im_info[1] + self._allowed_border) & # width
(all_anchors[:, 3] < im_info[0] + self._allowed_border) # height
)[0]
if DEBUG:
print 'total_anchors', total_anchors
print 'inds_inside', len(inds_inside)
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
if DEBUG:
print 'anchors.shape', anchors.shape
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside), ), dtype=np.float32)
labels.fill(-1)
# overlaps between the anchors and the gt boxes
# overlaps (ex, gt)
gt_boxes = gt_boxes.reshape(gt_boxes.shape[0], gt_boxes.shape[1])
overlaps = bbox_overlaps(
np.ascontiguousarray(anchors, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps,
np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# subsample positive labels if we have too many
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(
bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labels[disable_inds] = -1
#print "was %s inds, disabling %s, now %s inds" % (
#len(bg_inds), len(disable_inds), np.sum(labels == 0))
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])
bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)
bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
positive_weights = np.ones((1, 4)) * 1.0 / num_examples
negative_weights = np.ones((1, 4)) * 1.0 / num_examples
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &
(cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /
np.sum(labels == 1))
negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /
np.sum(labels == 0))
bbox_outside_weights[labels == 1, :] = positive_weights
bbox_outside_weights[labels == 0, :] = negative_weights
if DEBUG:
self._sums += bbox_targets[labels == 1, :].sum(axis=0)
self._squared_sums += (bbox_targets[labels == 1, :] ** 2).sum(axis=0)
self._counts += np.sum(labels == 1)
means = self._sums / self._counts
stds = np.sqrt(self._squared_sums / self._counts - means ** 2)
print 'means:'
print means
print 'stdevs:'
print stds
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)
if DEBUG:
print 'rpn: max max_overlap', np.max(max_overlaps)
print 'rpn: num_positive', np.sum(labels == 1)
print 'rpn: num_negative', np.sum(labels == 0)
self._fg_sum += np.sum(labels == 1)
self._bg_sum += np.sum(labels == 0)
self._count += 1
print 'rpn: num_positive avg', self._fg_sum / self._count
print 'rpn: num_negative avg', self._bg_sum / self._count
# labels
labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, 1, A * height, width))
top[0].reshape(*labels.shape)
top[0].data[...] = labels
# bbox_targets
bbox_targets = bbox_targets \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
top[1].reshape(*bbox_targets.shape)
top[1].data[...] = bbox_targets
# bbox_inside_weights
bbox_inside_weights = bbox_inside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_inside_weights.shape[2] == height
assert bbox_inside_weights.shape[3] == width
top[2].reshape(*bbox_inside_weights.shape)
top[2].data[...] = bbox_inside_weights
# bbox_outside_weights
bbox_outside_weights = bbox_outside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_outside_weights.shape[2] == height
assert bbox_outside_weights.shape[3] == width
top[3].reshape(*bbox_outside_weights.shape)
top[3].data[...] = bbox_outside_weights
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
def _compute_targets(ex_rois, gt_rois):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 5
targets = bbox_transform(ex_rois, gt_rois[:, :4]).astype(np.float32, copy=False)
if cfg.TRAIN.RPN_NORMALIZE_TARGETS:
assert cfg.TRAIN.RPN_NORMALIZE_MEANS is not None
assert cfg.TRAIN.RPN_NORMALIZE_STDS is not None
targets -= cfg.TRAIN.RPN_NORMALIZE_MEANS
targets /= cfg.TRAIN.RPN_NORMALIZE_STDS
return targets
| 11,700 | 39.487889 | 95 | py |
bottom-up-attention | bottom-up-attention-master/lib/rpn/heatmap_layer.py |
import caffe
import yaml
import numpy as np
import numpy.random as npr
from fast_rcnn.config import cfg
from fast_rcnn.bbox_transform import bbox_transform
from utils.cython_bbox import bbox_overlaps
DEBUG = False
class HeatmapLayer(caffe.Layer):
"""
Takes regions of interest (rois) and outputs heatmaps.
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str)
self._output_w = layer_params['output_w']
self._output_h = layer_params['output_h']
self._out_size = np.array([self._output_w, self._output_h,
self._output_w, self._output_h],dtype=float)
top[0].reshape(bottom[0].data.shape[0], 1, self._output_h, self._output_w)
def forward(self, bottom, top):
# im_info (height, width, scaling)
assert bottom[1].data.shape[0] == 1, 'Batch size == 1 only'
image_h = bottom[1].data[0][0]
image_w = bottom[1].data[0][1]
image_size = np.array([image_w, image_h, image_w, image_h],dtype=float)
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
rois = bottom[0].data
rois = rois.reshape(rois.shape[0], rois.shape[1])
rois = rois[:,1:]*self._out_size/image_size
# This will fill occupied pixels in an approximate (dilated) fashion
rois_int = np.round(np.hstack((
np.floor(rois[:,[0]]),
np.floor(rois[:,[1]]),
np.minimum(self._output_w-1,np.ceil(rois[:,[2]])),
np.minimum(self._output_h-1,np.ceil(rois[:,[3]]))
))).astype(int)
top[0].reshape(bottom[0].data.shape[0], 1, self._output_h, self._output_w)
top[0].data[...] = -1
for i in range(rois.shape[0]):
top[0].data[i, 0, rois_int[i,1]:rois_int[i,3], rois_int[i,0]:rois_int[i,2]] = 1
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
| 2,111 | 37.4 | 91 | py |
bottom-up-attention | bottom-up-attention-master/lib/transform/torch_image_transform_layer.py | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
""" Transform images for compatibility with models trained with
https://github.com/facebook/fb.resnet.torch.
Usage in model prototxt:
layer {
name: 'data_xform'
type: 'Python'
bottom: 'data_caffe'
top: 'data'
python_param {
module: 'transform.torch_image_transform_layer'
layer: 'TorchImageTransformLayer'
}
}
"""
import caffe
from fast_rcnn.config import cfg
import numpy as np
class TorchImageTransformLayer(caffe.Layer):
def setup(self, bottom, top):
# (1, 3, 1, 1) shaped arrays
self.PIXEL_MEANS = \
np.array([[[[0.48462227599918]],
[[0.45624044862054]],
[[0.40588363755159]]]])
self.PIXEL_STDS = \
np.array([[[[0.22889466674951]],
[[0.22446679341259]],
[[0.22495548344775]]]])
# The default ("old") pixel means that were already subtracted
channel_swap = (0, 3, 1, 2)
self.OLD_PIXEL_MEANS = \
cfg.PIXEL_MEANS[np.newaxis, :, :, :].transpose(channel_swap)
top[0].reshape(*(bottom[0].shape))
def forward(self, bottom, top):
ims = bottom[0].data
# Invert the channel means that were already subtracted
ims += self.OLD_PIXEL_MEANS
# 1. Permute BGR to RGB and normalize to [0, 1]
ims = ims[:, [2, 1, 0], :, :] / 255.0
# 2. Remove channel means
ims -= self.PIXEL_MEANS
# 3. Standardize channels
ims /= self.PIXEL_STDS
top[0].reshape(*(ims.shape))
top[0].data[...] = ims
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
| 2,000 | 29.784615 | 72 | py |
XDF-GAN | XDF-GAN-master/run-sgan-tessellate.py | """
Script to create very large tessellated GDF
Copyright 2019 Mike Smith
Please see COPYING for licence details
"""
import matplotlib as mpl
mpl.use("Agg")
# General imports
import numpy as np
import h5py
import os
from time import time
import argparse
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import imageio
from skimage.util import view_as_windows
# ML specific imports
from keras.models import load_model
def un_min_max_norm(ar, ar_max, ar_min):
"""
Reverse min max normalising carried out on the original UDF data.
"""
return ar*(ar_max - ar_min) + ar_min
def find_nearest(ar, val):
"""
Get position in array of value nearest to 'val'.
"""
return np.argmin(np.abs(ar - val))
def get_sigma(hwhm):
"""
Given the half width at half maximum, find the standard deviation of a normal distribution.
"""
return (2*np.abs(hwhm))/(np.sqrt(8*np.log(2)))
def apply_noise_low_vals(ar):
"""
Apply noise to low values given an array.
"""
hist = np.histogram(ar, 100000)
maxpoint = np.max(hist[0])
negsx = hist[1][:-1][hist[1][:-1] <= 0]
negsy = hist[0][hist[1][:-1] <= 0]
hwhm = negsx[find_nearest(negsy, maxpoint/2)]
sigma = get_sigma(hwhm)
mu = 0
ar_replaced_noise = noise_replacement_low_vals(ar, sigma, mu)
return ar_replaced_noise.astype(np.float32)
def rescale(ar):
"""
Rescale so peak is at zero.
"""
hist = np.histogram(ar, 10000)
delta = hist[1][hist[0].argmax()]
return ar - delta
def shuffle_noise_given_array(ar):
"""
Shuffle noise values given an array.
"""
hist = np.histogram(ar, 100000)
maxpoint = np.max(hist[0])
negsx = hist[1][:-1][hist[1][:-1] <= 0]
negsy = hist[0][hist[1][:-1] <= 0]
hwhm = negsx[find_nearest(negsy, maxpoint/2)]
sigma = get_sigma(hwhm)
mu = 0
low_vals = np.random.permutation(ar[ar <= 2*sigma])
ar[np.where(ar <= 2*sigma)] = low_vals
return ar.astype(np.float32)
if __name__ == "__main__":
# Argument parsing
parser = argparse.ArgumentParser("Prorduce a fake xdf file.")
# Args
parser.add_argument("-m", "--model", help="Model file (h5).")
parser.add_argument("-l", "--logdir", nargs="?", default="../big_ims", help="Logdir, default ../big_ims")
parser.add_argument("-z", "--z_size", nargs="?", default=1024, type=int, help="Input noise array size (*16 for output size), default 1024. Must be a power of 2.")
parser.add_argument("-o", "--overlap", nargs="?", default=32, type=int, help="Overlap between tiles in z space.")
parser.add_argument("-f", "--fits", default=False, action="store_true", help="Output in FITS format.")
parser.add_argument("-p", "--png", default=False, action="store_true", help="Output greyscale PNG images + histogram.")
parser.add_argument("-n", "--numpy", default=False, action="store_true", help="Output numpy array.")
args = parser.parse_args()
dt = int(time())
model_file = args.model
logdir = "{}/{}/".format(args.logdir, dt)
os.mkdir(logdir)
z_size = args.z_size
overlap = args.overlap
chunks = z_size//64
maxes = [0.5262004, 0.44799575, 0.62030375]
mins = [-0.004748813, -0.0031752307, -0.011242471]
# Load generator
gen = load_model(model_file)
big_z = np.random.randn(z_size+overlap, z_size+overlap, 50).astype(np.float32)
mini_zs = np.squeeze(view_as_windows(big_z, ((z_size//chunks)+overlap, (z_size//chunks)+overlap, 50), step=(z_size//chunks, z_size//chunks, 1)))
print(mini_zs.shape)
z = np.reshape(mini_zs, (np.product(mini_zs.shape[0:2]), *mini_zs.shape[2:]))
print(z.shape)
print("Predicting imagery...")
print("Batch size 4")
ims = gen.predict(z, batch_size=4, verbose=1) # Batched for very large imagery
print(logdir, ims.shape)
ims = ims[:, (overlap*16)//2:-(overlap*16)//2, (overlap*16)//2:-(overlap*16)//2, :] # remove overlap
ims = np.reshape(ims, (*mini_zs.shape[0:2], 1024, 1024, 3))
im = np.concatenate(np.split(ims, len(ims), axis=0), axis=2) # Stitch image back together
im = np.squeeze(np.concatenate(np.split(im, len(ims), axis=1), axis=3)) # ditto...
# Output values
if args.numpy: # Output n-channel image in npy format
print("Outputting as npy")
np.save("{}array.npy".format(logdir), np.squeeze(im))
if args.png: # Output PNG images for each channel + a histogram for each (n-channel) image
print("Outputting as PNG")
hist = np.histogram(im, 10000)
plt.yscale("log")
plt.plot(hist[1][:-1], hist[0])
plt.savefig("{}hist.png".format(logdir))
plt.close()
for channel in np.arange(ims.shape[-1]):
plt.figure(figsize=(32, 32))
plt.tight_layout()
plt.imshow(np.squeeze(im[..., channel]))
plt.savefig("{}{}.png".format(logdir, channel))
plt.close()
if args.fits: # Output as a separate FITS image for each channel
print("Outputting as FITS")
#im = un_min_max_norm(im, ar_max=0.4142234, ar_min=-0.011242471) # Uncomment for image wise norming
for channel in np.arange(ims.shape[-1]):
print("Channel:", channel)
print("Before unnorming:", im[..., channel].max(), im[..., channel].min())
im[..., channel] = un_min_max_norm(im[..., channel], ar_max=maxes[channel], ar_min=mins[channel]) # For channel wise norming
im[..., channel] = rescale(im[..., channel])
print("After unnorming:", im[..., channel].max(), im[..., channel].min())
#pyfits.writeto("{}{}.fits".format(logdir, channel), np.squeeze(shuffle_noise_given_array(im[..., channel])), overwrite=True)
pyfits.writeto("{}{}.fits".format(logdir, channel), np.squeeze(im[..., channel]), overwrite=True)
| 5,897 | 35.8625 | 166 | py |
XDF-GAN | XDF-GAN-master/run-sgan.py | """
Script to run GDF generation
Copyright 2019 Mike Smith
Please see COPYING for licence details
"""
import matplotlib as mpl
mpl.use("Agg")
# General imports
import numpy as np
import h5py
import os
from time import time
import argparse
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# ML specific imports
from keras.models import load_model
def un_min_max_norm(ar, ar_max, ar_min):
"""
Reverse min max normalising carried out on the original UDF data.
"""
return ar*(ar_max - ar_min) + ar_min
def find_nearest(ar, val):
"""
Get position in array of value nearest to 'val'.
"""
return np.argmin(np.abs(ar - val))
def get_sigma(hwhm):
"""
Given the half width at half maximum, find the standard deviation of a normal distribution.
"""
return (2*np.abs(hwhm))/(np.sqrt(8*np.log(2)))
def noise_replacement_low_vals(x, sigma, mu):
"""
Replace low values with a random normal distribution
"""
return np.random.normal(mu, sigma) if np.abs(x) <= 2*sigma else x
def apply_noise_low_vals(ar):
"""
Apply noise to low values given an array.
"""
hist = np.histogram(ar, 100000)
maxpoint = np.max(hist[0])
negsx = hist[1][:-1][hist[1][:-1] <= 0]
negsy = hist[0][hist[1][:-1] <= 0]
hwhm = negsx[find_nearest(negsy, maxpoint/2)]
sigma = get_sigma(hwhm)
mu = 0
ar_replaced_noise = noise_replacement_low_vals(ar, sigma, mu)
return ar_replaced_noise.astype(np.float32)
def noise_replacement_all_vals(x, sigma, mu):
"""
Add a noise sampled from a gaussian to all values
"""
return x + np.random.normal(mu, sigma)
def apply_noise_all_vals(ar):
"""
Apply additive noise to all values given an array.
"""
hist = np.histogram(ar, 100000)
maxpoint = np.max(hist[0])
negsx = hist[1][:-1][hist[1][:-1] <= 0]
negsy = hist[0][hist[1][:-1] <= 0]
hwhm = negsx[find_nearest(negsy, maxpoint/2)]
sigma = get_sigma(hwhm)
mu = 0
ar_replaced_noise = noise_replacement_all_vals(ar, sigma, mu)
return ar_replaced_noise.astype(np.float32)
def rescale(ar):
"""
Rescale so peak is at zero.
"""
hist = np.histogram(ar, 10000)
delta = hist[1][hist[0].argmax()]
return ar - delta
def shuffle_noise_given_array(ar):
"""
Shuffle noise values given an array.
"""
hist = np.histogram(ar, 100000)
maxpoint = np.max(hist[0])
negsx = hist[1][:-1][hist[1][:-1] <= 0]
negsy = hist[0][hist[1][:-1] <= 0]
hwhm = negsx[find_nearest(negsy, maxpoint/2)]
sigma = get_sigma(hwhm)
mu = 0
low_vals = np.random.permutation(ar[ar <= 1*sigma])
ar[np.where(ar <= 1*sigma)] = low_vals
return ar.astype(np.float32)
if __name__ == "__main__":
# Argument parsing
parser = argparse.ArgumentParser("Produce a fake xdf file.")
# Args
parser.add_argument("-m", "--model", help="Model file (h5).")
parser.add_argument("-l", "--logdir", nargs="?", default="../logs/outs", help="Logdir, default ../logs/outs/$UNIXTIME")
parser.add_argument("-z", "--z_size", nargs="?", default=64, type=int, help="Input noise array size (*16 for output size), default 64.")
parser.add_argument("-n", "--images", nargs="?", default=10, type=int, help="Number of images to generate.")
parser.add_argument("-f", "--fits", default=False, action="store_true", help="Output in FITS format.")
parser.add_argument("-p", "--png", default=False, action="store_true", help="Output greyscale PNG images + histogram.")
parser.add_argument("--numpy", default=False, action="store_true", help="Output numpy array.")
parser.add_argument("-s", "--shuffle", default=False, action="store_true", help="Shuffle output to mitigate noise waffling in FITS output.")
parser.add_argument("--seed", nargs="?", default=42, type=int, help="A seed for np.random.seed")
args = parser.parse_args()
np.random.seed(args.seed)
dt = int(time())
model_file = args.model
n_images = args.images
logdir = "{}/{}/".format(args.logdir, dt)
os.mkdir(logdir)
z_size = args.z_size
test_batch_size = 100
# These are the original image maxima and minima for each channel
maxes = [0.5262004, 0.44799575, 0.62030375]
mins = [-0.004748813, -0.0031752307, -0.011242471]
noise_replacement_low_vals = np.vectorize(noise_replacement_low_vals)
noise_replacement_all_vals = np.vectorize(noise_replacement_all_vals)
# Load generator
gen = load_model(model_file)
z = np.random.randn(n_images, z_size, z_size, 50).astype(np.float32)
ims = gen.predict(z, batch_size=1, verbose=1) # added dtype still needs testing
print(logdir, ims.shape, ims.dtype)
# Output values
for i, im in enumerate(ims):
if args.numpy: # Output n-channel image in npy format
print("Outputting as npy")
np.save("{}{}.npy".format(logdir, i), np.squeeze(im))
if args.png: # Output PNG images for each channel + a histogram for each (n-channel) image
print("Outputting as PNG")
hist = np.histogram(im, 10000)
plt.yscale("log")
plt.plot(hist[1][:-1], hist[0])
plt.savefig("{}{}-hist.png".format(logdir, i))
plt.close()
for channel in np.arange(ims.shape[-1]):
plt.figure(figsize=(16, 16))
plt.imshow(np.squeeze(im[..., channel]), norm=LogNorm())
plt.savefig("{}{}-{}.png".format(logdir, i, channel))
plt.close()
if args.fits: # Output as a separate FITS image for each channel
print("Outputting as FITS")
#im = un_min_max_norm(im, ar_max=0.4142234, ar_min=-0.011242471) # Uncomment for image wide norming
for channel in np.arange(ims.shape[-1]):
print("Channel:", channel)
print("Before unnorming:", im[..., channel].max(), im[..., channel].min())
im[..., channel] = un_min_max_norm(im[..., channel], ar_max=maxes[channel], ar_min=mins[channel]) # For channel wise norming
im[..., channel] = rescale(im[..., channel])
print("After unnorming:", im[..., channel].max(), im[..., channel].min())
if args.shuffle:
pyfits.writeto("{}{}-{}.fits".format(logdir, i, channel), np.squeeze(shuffle_noise_given_array(im[..., channel])), overwrite=True)
else:
pyfits.writeto("{}{}-{}.fits".format(logdir, i, channel), np.squeeze(im[..., channel]), overwrite=True)
| 6,620 | 35.379121 | 150 | py |
XDF-GAN | XDF-GAN-master/sgan.py | """
Script to train GDF-SGAN
Copyright 2019 Mike Smith
Please see COPYING for licence details
"""
import matplotlib as mpl
mpl.use("Agg")
# General imports
import numpy as np
import h5py
import os
from time import time
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import argparse
# ML specific imports
import tensorflow as tf
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Lambda, Conv2D, Conv2DTranspose, LeakyReLU, ELU, GlobalAveragePooling2D, Concatenate
from keras.optimizers import Adam
from keras.utils.generic_utils import Progbar
from keras.preprocessing.image import ImageDataGenerator
def get_images(file):
"""
Get XDF fits (np) file.
"""
im = np.load(file)
print(im.shape)
return im
def random_crop(img, crop_size=128):
"""
Random crop big xdf image.
"""
height, width = img.shape[0], img.shape[1]
x = np.random.randint(0, width - crop_size + 1)
y = np.random.randint(0, height - crop_size + 1)
return img[y:(y+crop_size), x:(x+crop_size), :]
def gen(z_shape=(None, None, 50), num_layers=4):
"""
Model a spatial GAN generator with `num_layers` hidden layers.
"""
fs = [32*2**f for f in np.arange(num_layers)][::-1] # define filter sizes
z = Input(shape=z_shape) # z
ct = Conv2DTranspose(filters=fs[0], kernel_size=4, strides=2, padding="same")(z)
ct = ELU()(ct)
for f in fs[1:]:
ct = Conv2DTranspose(filters=f, kernel_size=4, strides=2, padding="same")(ct)
ct = ELU()(ct)
ct = Conv2DTranspose(filters=f, kernel_size=4, strides=1, padding="same")(ct)
ct = ELU()(ct)
ct = Conv2DTranspose(filters=f, kernel_size=4, strides=1, padding="same")(ct)
ct = ELU()(ct)
G_z = Conv2DTranspose(filters=3, kernel_size=3, strides=1, padding="same", activation="sigmoid")(ct)
model = Model(z, G_z, name="Generator")
model.summary()
return model
def disc(x_shape=(None, None, 6), num_layers=4):
"""
Model a spatial GAN discriminator.
"""
fs = [32*2**f for f in np.arange(num_layers)] # define filter sizes
x = Input(shape=x_shape)
c = Conv2D(filters=fs[0], kernel_size=4, strides=2, padding="same")(x)
c = LeakyReLU(0.1)(c)
for f in fs[1:]:
c = Conv2D(filters=f, kernel_size=4, strides=2, padding="same")(c)
c = LeakyReLU(0.1)(c)
gap = GlobalAveragePooling2D()(c)
y = Dense(1)(gap)
model = Model(x, y, name="Discriminator")
model.summary()
return model
if __name__ == "__main__":
# Argument parsing
parser = argparse.ArgumentParser("Run a spatial GAN on XDF FITS data.")
# Args
parser.add_argument("-f", "--im_file", nargs="?", default="./data/mc_channelwise_clipping.npy", help="Numpy file containing image data.")
parser.add_argument("-b", "--batch_size", type=int, default=32, help="Batch size, default 32.")
parser.add_argument("-e", "--epochs", type=int, default=10001, help="Number of training epochs, default 301.")
parser.add_argument("-l", "--logdir", nargs="?", default="./logs", help="Logdir, default ./logs")
parser.add_argument("-r", "--learning_rate", nargs="?", type=float, default=0.0002, help="Learning rate for ADAM op")
parser.add_argument("-d", "--debug", dest="debug", default=False, action="store_true", help="Print example images/histograms at every epoch")
parser.add_argument("--gen_weights", nargs="?", help="File containing gen weights for continuation of training.")
parser.add_argument("--disc_weights", nargs="?", help="File containing disc weights for continuation of training.")
args = parser.parse_args()
batch_size = args.batch_size
epochs = args.epochs
debug = args.debug
disc_weights = args.disc_weights
gen_weights = args.gen_weights
dt = int(time())
logdir = "{}/{}/".format(args.logdir, dt)
print("logdir:", logdir)
os.mkdir(logdir)
sizes = [(4, 64), (8, 128), (16, 256)] # Possible input and output sizes
test_batch_size = (1, 32, 32, 50)
# might want to alter learning rate...
adam_op = Adam(lr=args.learning_rate, beta_1=0.5, beta_2=0.999)
xdf = get_images(args.im_file)[..., 1:4] # take F606W, F775W and F814W channels
og_histo = np.histogram(xdf, 10000)
# Define generator and discriminator models
gen = gen()
disc = disc()
if disc_weights is not None and gen_weights is not None:
gen.load_weights(gen_weights)
disc.load_weights(disc_weights)
# Define real and fake images
raw_reals = Input(shape=(None, None, 3))
reals = Lambda(lambda x: tf.split(x, num_or_size_splits=2, axis=0))(raw_reals)
reals = Concatenate(axis=-1)([reals[0], reals[1]])
z = Input(shape=(None, None, 50))
fakes = Lambda(lambda x: tf.split(x, num_or_size_splits=2, axis=0))(gen(z))
fakes = Concatenate(axis=-1)([fakes[0], fakes[1]])
disc_r = disc(reals) # C(x_r)
disc_f = disc(fakes) # C(x_f)
# Define generator and discriminator losses according to RaGAN described in Jolicoeur-Martineau (2018).
# Dummy predictions and trues are needed in Keras (see https://github.com/Smith42/keras-relativistic-gan).
def rel_disc_loss(y_true, y_pred):
epsilon = 1e-9
return K.abs(-(K.mean(K.log(K.sigmoid(disc_r - K.mean(disc_f, axis=0))+epsilon), axis=0)\
+K.mean(K.log(1-K.sigmoid(disc_f - K.mean(disc_r, axis=0))+epsilon), axis=0)))
def rel_gen_loss(y_true, y_pred):
epsilon = 1e-9
return K.abs(-(K.mean(K.log(K.sigmoid(disc_f - K.mean(disc_r, axis=0))+epsilon), axis=0)\
+K.mean(K.log(1-K.sigmoid(disc_r - K.mean(disc_f, axis=0))+epsilon), axis=0)))
# Define trainable generator and discriminator
gen_train = Model([z, raw_reals], [disc_r, disc_f])
disc.trainable = False
gen_train.compile(adam_op, loss=[rel_gen_loss, None])
gen_train.summary()
disc_train = Model([z, raw_reals], [disc_r, disc_f])
gen.trainable = False
disc.trainable = True
disc_train.compile(adam_op, loss=[rel_disc_loss, None])
disc_train.summary()
# Train RaGAN
gen_loss = []
disc_loss = []
dummy_y = np.zeros((batch_size, 1), dtype=np.float32)
test_z = np.random.randn(test_batch_size[0],\
test_batch_size[1],\
test_batch_size[2],\
test_batch_size[3]).astype(np.float32)
# Define batch flow
batchflow = ImageDataGenerator(rotation_range=0,\
width_shift_range=0.0,\
height_shift_range=0.0,\
shear_range=0.0,\
zoom_range=0.0,\
channel_shift_range=0.0,\
fill_mode='reflect',\
horizontal_flip=True,\
vertical_flip=True,\
rescale=None)
start_time = time()
for epoch in np.arange(epochs):
print(epoch, "/", epochs)
n_batches = 30 # int(len(ims) // batch_size)
prog_bar = Progbar(target=n_batches)
batch_start_time = time()
for index in np.arange(n_batches):
size = sizes[np.random.randint(len(sizes))]
prog_bar.update(index)
# Update G
image_batch = batchflow.flow(np.array([random_crop(xdf, size[1]) for i in np.arange(batch_size)]), batch_size=batch_size)[0]
z = np.random.randn(batch_size, size[0], size[0], 50).astype(np.float32)
disc.trainable = False
gen.trainable = True
gen_loss.append(gen_train.train_on_batch([z, image_batch], dummy_y))
# Update D
image_batch = batchflow.flow(np.array([random_crop(xdf, size[1]) for i in np.arange(batch_size)]), batch_size=batch_size)[0]
z = np.random.randn(batch_size, size[0], size[0], 50).astype(np.float32)
disc.trainable = True
gen.trainable = False
disc_loss.append(disc_train.train_on_batch([z, image_batch], dummy_y))
print("\nEpoch time", int(time() - batch_start_time))
print("Total elapsed time", int(time() - start_time))
print("Gen, Disc losses", gen_loss[-1], disc_loss[-1])
## Print out losses and pics of G(z) outputs ##
if debug or epoch % 5 == 0:
gen_image = gen.predict(test_z)
print("OG im: max, min, mean, std", xdf.max(), xdf.min(), xdf.mean(), xdf.std())
print("Gen im: max, min, mean, std", gen_image.max(), gen_image.min(), gen_image.mean(), gen_image.std())
# Plot generated/real histo comparison
gen_histo = np.histogram(gen_image, 10000)
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(16, 16))
axs.set_yscale("log")
axs.plot(og_histo[1][:-1], og_histo[0], label="Original")
axs.plot(gen_histo[1][:-1], gen_histo[0], label="Generated")
axs.legend()
plt.savefig("{}/{:05d}-histogram.png".format(logdir, epoch))
plt.close(fig)
# Plot generated image
fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(30, 20))
axs[0, 0].imshow(gen_image[0, ..., 0], cmap="gray", norm=LogNorm())
axs[0, 1].imshow(gen_image[0, ..., 1], cmap="gray", norm=LogNorm())
axs[0, 2].imshow(gen_image[0, ..., 2], cmap="gray", norm=LogNorm())
#axs[1, 0].imshow(gen_image[0, ..., 3], cmap="gray", norm=LogNorm())
#axs[1, 1].imshow(gen_image[0, ..., 4], cmap="gray", norm=LogNorm())
axs[1, 0].imshow(gen_image[0], norm=LogNorm()) # was [1,2] and sliced [...,1:4]
plt.tight_layout()
plt.savefig("{}/{:05d}-example.png".format(logdir, epoch))
plt.close(fig)
## Save model ##
if epoch % 10 == 0:
gen.save("{}/{:05d}-gen-model.h5".format(logdir, epoch))
gen.save_weights("{}/{:05d}-gen-weights.h5".format(logdir, epoch))
disc.save_weights("{}/{:05d}-disc-weights.h5".format(logdir, epoch))
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(8, 4))
disc_loss_ar = np.array(disc_loss)[:, 0]
gen_loss_ar = np.array(gen_loss)[:, 0]
axs.set_title("Losses at epoch " + str(epoch))
axs.set_xlabel("Global step")
axs.set_ylabel("Loss")
axs.set_yscale("log")
axs.plot(disc_loss_ar, label="disc loss")
axs.plot(gen_loss_ar, label="gen loss")
axs.legend()
plt.savefig("{}/{:05d}-loss.png".format(logdir, epoch))
plt.close(fig)
| 10,761 | 39.920152 | 145 | py |
rlmeta | rlmeta-main/examples/atari/ppo/atari_ppo_rnd_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
from rlmeta.agents.ppo import PPORNDModel
from rlmeta.core.types import NestedTensor
from rlmeta.models.actor_critic import DiscreteActorCriticRNDHead
from rlmeta.models.atari import NatureCNNBackbone, ImpalaCNNBackbone
class AtariPPORNDModel(PPORNDModel):
def __init__(self, num_actions: int, network: str = "nature") -> None:
super().__init__()
self._num_actions = num_actions
self._network = network.lower()
if self._network == "nature":
self._ppo_net = NatureCNNBackbone()
self._tgt_net = NatureCNNBackbone()
self._prd_net = NatureCNNBackbone()
self._head = DiscreteActorCriticRNDHead(self._ppo_net.output_size,
[512], num_actions)
elif self._network == "impala":
self._ppo_net = ImpalaCNNBackbone()
self._tgt_net = ImpalaCNNBackbone()
self._prd_net = ImpalaCNNBackbone()
self._head = DiscreteActorCriticRNDHead(self._ppo_net.output_size,
[256], num_actions)
else:
assert False, "Unsupported network."
def forward(
self, obs: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
x = obs.float() / 255.0
h = self._ppo_net(x)
logpi, ext_v, int_v = self._head(h)
return logpi, ext_v, int_v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
with torch.no_grad():
logpi, ext_v, int_v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, ext_v, int_v
@remote.remote_method(batch_size=None)
def intrinsic_reward(self, obs: torch.Tensor) -> torch.Tensor:
return self._rnd_error(obs)
def rnd_loss(self, obs: torch.Tensor) -> torch.Tensor:
return self._rnd_error(obs).mean() * 0.5
def _rnd_error(self, obs: torch.Tensor) -> torch.Tensor:
x = obs.float() / 255.0
with torch.no_grad():
tgt = self._tgt_net(x)
prd = self._prd_net(x)
err = (prd - tgt).square().mean(-1, keepdim=True)
return err
| 2,889 | 35.125 | 78 | py |
rlmeta | rlmeta-main/examples/atari/ppo/atari_ppo_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.nn as nn
import rlmeta.core.remote as remote
from rlmeta.agents.ppo import PPOModel
from rlmeta.models.actor_critic import DiscreteActorCriticHead
from rlmeta.models.atari import NatureCNNBackbone, ImpalaCNNBackbone
class AtariPPOModel(PPOModel):
def __init__(self, num_actions: int, network: str = "nature") -> None:
super().__init__()
self._num_actions = num_actions
self._network = network.lower()
if self._network == "nature":
self._backbone = NatureCNNBackbone()
self._head = DiscreteActorCriticHead(self._backbone.output_size,
[512], num_actions)
elif self._network == "impala":
self._backbone = ImpalaCNNBackbone()
self._head = DiscreteActorCriticHead(self._backbone.output_size,
[256], num_actions)
else:
assert False, "Unsupported network."
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
x = obs.float() / 255.0
h = self._backbone(x)
logpi, v = self._head(h)
return logpi, v
@remote.remote_method(batch_size=128)
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
with torch.no_grad():
logpi, v = self.forward(obs)
greedy_action = logpi.argmax(-1, keepdim=True)
sample_action = logpi.exp().multinomial(1, replacement=True)
action = torch.where(deterministic_policy, greedy_action,
sample_action)
logpi = logpi.gather(dim=-1, index=action)
return action, logpi, v
| 1,988 | 35.163636 | 78 | py |
rlmeta | rlmeta-main/examples/atari/ppo/atari_ppo.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import logging
import time
import hydra
import torch
import torch.multiprocessing as mp
import rlmeta.envs.atari_wrapper as atari_wrapper
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from examples.atari.ppo.atari_ppo_model import AtariPPOModel
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo import PPOAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
@hydra.main(config_path="./conf", config_name="conf_ppo")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
logging.info(hydra_utils.config_to_json(cfg))
env = atari_wrapper.make_atari_env(**cfg.env)
model = AtariPPOModel(env.action_space.n,
network=cfg.network).to(cfg.train_device)
model_pool = RemotableModelPool(copy.deepcopy(model).to(cfg.infer_device),
seed=cfg.seed)
optimizer = make_optimizer(model.parameters(), **cfg.optimizer)
replay_buffer = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
ctrl = Controller()
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(model_pool)
r_server.add_service(replay_buffer)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
learner_model = wrap_downstream_model(model, m_server)
t_actor_model = make_remote_model(model,
m_server,
version=ModelVersion.LATEST)
# During blocking evaluation we have STABLE is LATEST
e_actor_model = make_remote_model(model,
m_server,
version=ModelVersion.LATEST)
learner_ctrl = remote_utils.make_remote(ctrl, c_server)
t_actor_ctrl = remote_utils.make_remote(ctrl, c_server)
e_actor_ctrl = remote_utils.make_remote(ctrl, c_server)
learner_replay_buffer = make_remote_replay_buffer(replay_buffer,
r_server,
prefetch=cfg.prefetch)
t_actor_replay_buffer = make_remote_replay_buffer(replay_buffer, r_server)
env_fac = atari_wrapper.AtariWrapperFactory(**cfg.env)
t_agent_fac = AgentFactory(PPOAgent,
t_actor_model,
replay_buffer=t_actor_replay_buffer,
gamma=cfg.gamma)
e_agent_fac = AgentFactory(
PPOAgent,
e_actor_model,
deterministic_policy=cfg.deterministic_evaluation)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_actor_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_training_rollouts,
num_workers=cfg.num_training_workers,
seed=cfg.seed)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_actor_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_evaluation_rollouts,
num_workers=cfg.num_evaluation_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_training_rollouts))
loops = LoopList([t_loop, e_loop])
learner = PPOAgent(learner_model,
replay_buffer=learner_replay_buffer,
controller=learner_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
gamma=cfg.gamma,
learning_starts=cfg.learning_starts,
model_push_period=cfg.model_push_period)
servers.start()
loops.start()
learner.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = learner.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = learner.eval(cfg.num_evaluation_episodes,
keep_training_loops=True)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(model.state_dict(), f"ppo_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| 5,968 | 38.013072 | 78 | py |
rlmeta | rlmeta-main/examples/atari/ppo/atari_ppo_rnd.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import logging
import time
import hydra
import torch
import torch.multiprocessing as mp
import rlmeta.envs.atari_wrapper as atari_wrapper
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from examples.atari.ppo.atari_ppo_rnd_model import AtariPPORNDModel
from rlmeta.agents.agent import AgentFactory
from rlmeta.agents.ppo import PPORNDAgent
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.samplers import UniformSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
@hydra.main(config_path="./conf", config_name="conf_ppo")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
logging.info(hydra_utils.config_to_json(cfg))
env = atari_wrapper.make_atari_env(**cfg.env)
model = AtariPPORNDModel(env.action_space.n,
network=cfg.network).to(cfg.train_device)
model_pool = RemotableModelPool(copy.deepcopy(model).to(cfg.infer_device),
seed=cfg.seed)
optimizer = make_optimizer(model.parameters(), **cfg.optimizer)
ctrl = Controller()
replay_buffer = ReplayBuffer(TensorCircularBuffer(cfg.replay_buffer_size),
UniformSampler())
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(model_pool)
r_server.add_service(replay_buffer)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
learner_model = wrap_downstream_model(model, m_server)
t_actor_model = make_remote_model(model,
m_server,
version=ModelVersion.LATEST)
# During blocking evaluation we have STABLE is LATEST
e_actor_model = make_remote_model(model,
m_server,
version=ModelVersion.LATEST)
a_ctrl = remote_utils.make_remote(ctrl, c_server)
t_ctrl = remote_utils.make_remote(ctrl, c_server)
e_ctrl = remote_utils.make_remote(ctrl, c_server)
learner_replay_buffer = make_remote_replay_buffer(replay_buffer,
r_server,
prefetch=cfg.prefetch)
t_actor_replay_buffer = make_remote_replay_buffer(replay_buffer, r_server)
env_fac = atari_wrapper.AtariWrapperFactory(**cfg.env)
t_agent_fac = AgentFactory(PPORNDAgent,
t_actor_model,
replay_buffer=t_actor_replay_buffer)
e_agent_fac = AgentFactory(
PPORNDAgent,
e_actor_model,
deterministic_policy=cfg.deterministic_evaluation)
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_training_rollouts,
num_workers=cfg.num_training_workers,
seed=cfg.seed)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_evaluation_rollouts,
num_workers=cfg.num_evaluation_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_training_rollouts))
loops = LoopList([t_loop, e_loop])
learner = PPORNDAgent(learner_model,
replay_buffer=learner_replay_buffer,
controller=a_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
learning_starts=cfg.get("learning_starts", None),
model_push_period=cfg.model_push_period)
servers.start()
loops.start()
learner.connect()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = learner.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = learner.eval(cfg.num_evaluation_episodes,
keep_training_loops=True)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(model.state_dict(), f"ppo_rnd_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| 5,904 | 38.10596 | 78 | py |
rlmeta | rlmeta-main/examples/atari/dqn/atari_dqn_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import rlmeta.core.remote as remote
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.dqn import DQNModel
from rlmeta.core.types import NestedTensor
from rlmeta.models.atari import NatureCNNBackbone, ImpalaCNNBackbone
from rlmeta.models.dqn import DQNHead, DuelingDQNHead
class AtariDQNNet(nn.Module):
def __init__(self,
num_actions: int,
network="nature",
dueling_dqn: bool = True,
spectral_norm: bool = True) -> None:
super().__init__()
self._num_actions = num_actions
self._network = network.lower()
self._dueling_dqn = dueling_dqn
self._spectral_norm = spectral_norm
head_cls = DuelingDQNHead if dueling_dqn else DQNHead
if self._network == "nature":
self._backbone = NatureCNNBackbone()
self._head = head_cls(self._backbone.output_size, [512],
num_actions)
elif self._network == "impala":
self._backbone = ImpalaCNNBackbone()
self._head = head_cls(self._backbone.output_size, [256],
num_actions)
else:
assert False, "Unsupported network."
def init_model(self) -> None:
if self._spectral_norm:
# Apply SN[-2] in https://arxiv.org/abs/2105.05246
if self._dueling_dqn:
nn.utils.parametrizations.spectral_norm(
self._head._mlp_a._layers[-3])
nn.utils.parametrizations.spectral_norm(
self._head._mlp_v._layers[-3])
else:
nn.utils.parametrizations.spectral_norm(
self._head._mlp._layers[-3])
def forward(self, observation: torch.Tensor) -> torch.Tensor:
x = observation.float() / 255.0
h = self._backbone(x)
a = self._head(h)
return a
class AtariDQNModel(DQNModel):
def __init__(self,
num_actions: int,
network: str = "nature",
dueling_dqn: bool = True,
spectral_norm: bool = True,
double_dqn: bool = False) -> None:
super().__init__()
self._num_actions = num_actions
self._network = network.lower()
self._dueling_dqn = dueling_dqn
self._spectral_norm = spectral_norm
self._double_dqn = double_dqn
# Bootstrapping with online network when double_dqn = False.
# https://arxiv.org/pdf/2209.07550.pdf
self._online_net = AtariDQNNet(num_actions,
network=network,
dueling_dqn=dueling_dqn,
spectral_norm=spectral_norm)
self._target_net = copy.deepcopy(
self._online_net) if double_dqn else None
def init_model(self) -> None:
self._online_net.init_model()
if self._target_net is not None:
self._target_net.init_model()
def forward(self, observation: torch.Tensor) -> torch.Tensor:
return self._online_net(observation)
def q(self, s: torch.Tensor, a: torch.Tensor) -> torch.Tensor:
q = self._online_net(s)
q = q.gather(dim=-1, index=a)
return q
@remote.remote_method(batch_size=256)
def act(self, observation: torch.Tensor,
eps: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad():
q = self._online_net(observation) # size = (batch_size, action_dim)
_, action_dim = q.size()
greedy_action = q.argmax(-1, keepdim=True)
pi = torch.ones_like(q) * (eps / action_dim)
pi.scatter_(dim=-1,
index=greedy_action,
src=1.0 - eps * (action_dim - 1) / action_dim)
action = pi.multinomial(1)
v = self._value(observation, q)
q = q.gather(dim=-1, index=action)
return action, q, v
def sync_target_net(self) -> None:
if self._target_net is not None:
self._target_net.load_state_dict(self._online_net.state_dict())
def _value(self,
observation: torch.Tensor,
q: Optional[torch.Tensor] = None) -> torch.Tensor:
if q is None:
q = self._online_net(observation)
if not self._double_dqn:
v = q.max(-1, keepdim=True)[0]
else:
a = q.argmax(-1, keepdim=True)
q = self._target_net(observation)
v = q.gather(dim=-1, index=a)
return v
| 4,918 | 34.388489 | 80 | py |
rlmeta | rlmeta-main/examples/atari/dqn/atari_apex_dqn.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
import time
import hydra
import torch
import torch.multiprocessing as mp
import rlmeta.envs.atari_wrapper as atari_wrapper
import rlmeta.utils.hydra_utils as hydra_utils
import rlmeta.utils.random_utils as random_utils
import rlmeta.utils.remote_utils as remote_utils
from examples.atari.dqn.atari_dqn_model import AtariDQNModel
from rlmeta.agents.dqn import (ApexDQNAgent, ApexDQNAgentFactory,
ConstantEpsFunc, FlexibleEpsFunc)
from rlmeta.core.controller import Phase, Controller
from rlmeta.core.loop import LoopList, ParallelLoop
from rlmeta.core.model import ModelVersion, RemotableModelPool
from rlmeta.core.model import make_remote_model, wrap_downstream_model
from rlmeta.core.replay_buffer import ReplayBuffer, make_remote_replay_buffer
from rlmeta.core.server import Server, ServerList
from rlmeta.samplers import PrioritizedSampler
from rlmeta.storage import TensorCircularBuffer
from rlmeta.utils.optimizer_utils import make_optimizer
@hydra.main(config_path="./conf", config_name="conf_apex_dqn")
def main(cfg):
if cfg.seed is not None:
random_utils.manual_seed(cfg.seed)
logging.info(hydra_utils.config_to_json(cfg))
env = atari_wrapper.make_atari_env(**cfg.env)
model = AtariDQNModel(env.action_space.n,
network=cfg.network,
dueling_dqn=cfg.dueling_dqn,
spectral_norm=cfg.spectral_norm,
double_dqn=cfg.double_dqn).to(cfg.train_device)
infer_model = copy.deepcopy(model).to(cfg.infer_device)
infer_model.eval()
model_pool = RemotableModelPool(infer_model, seed=cfg.seed)
optimizer = make_optimizer(model.parameters(), **cfg.optimizer)
replay_buffer = ReplayBuffer(
TensorCircularBuffer(cfg.replay_buffer_size),
PrioritizedSampler(priority_exponent=cfg.priority_exponent))
ctrl = Controller()
m_server = Server(cfg.m_server_name, cfg.m_server_addr)
r_server = Server(cfg.r_server_name, cfg.r_server_addr)
c_server = Server(cfg.c_server_name, cfg.c_server_addr)
m_server.add_service(model_pool)
r_server.add_service(replay_buffer)
c_server.add_service(ctrl)
servers = ServerList([m_server, r_server, c_server])
learner_model = wrap_downstream_model(model, m_server)
t_actor_model = make_remote_model(model,
m_server,
version=ModelVersion.LATEST)
# During blocking evaluation we have STABLE is LATEST
e_actor_model = make_remote_model(model,
m_server,
version=ModelVersion.LATEST)
learner_ctrl = remote_utils.make_remote(ctrl, c_server)
t_actor_ctrl = remote_utils.make_remote(ctrl, c_server)
e_actor_ctrl = remote_utils.make_remote(ctrl, c_server)
learner_replay_buffer = make_remote_replay_buffer(replay_buffer,
r_server,
prefetch=cfg.prefetch)
t_actor_replay_buffer = make_remote_replay_buffer(replay_buffer, r_server)
env_fac = atari_wrapper.AtariWrapperFactory(**cfg.env)
t_agent_fac = ApexDQNAgentFactory(t_actor_model,
FlexibleEpsFunc(
cfg.eps, cfg.num_training_rollouts),
replay_buffer=t_actor_replay_buffer,
n_step=cfg.n_step,
gamma=cfg.gamma,
max_abs_reward=cfg.max_abs_reward,
rescale_value=cfg.rescale_value)
e_agent_fac = ApexDQNAgentFactory(e_actor_model,
ConstantEpsFunc(cfg.evaluation_eps))
t_loop = ParallelLoop(env_fac,
t_agent_fac,
t_actor_ctrl,
running_phase=Phase.TRAIN,
should_update=True,
num_rollouts=cfg.num_training_rollouts,
num_workers=cfg.num_training_workers,
seed=cfg.seed)
e_loop = ParallelLoop(env_fac,
e_agent_fac,
e_actor_ctrl,
running_phase=Phase.EVAL,
should_update=False,
num_rollouts=cfg.num_evaluation_rollouts,
num_workers=cfg.num_evaluation_workers,
seed=(None if cfg.seed is None else cfg.seed +
cfg.num_training_rollouts))
loops = LoopList([t_loop, e_loop])
learner = ApexDQNAgent(
learner_model,
replay_buffer=learner_replay_buffer,
controller=learner_ctrl,
optimizer=optimizer,
batch_size=cfg.batch_size,
max_grad_norm=cfg.max_grad_norm,
n_step=cfg.n_step,
gamma=cfg.gamma,
importance_sampling_exponent=cfg.importance_sampling_exponent,
value_clipping_eps=cfg.value_clipping_eps,
fr_kappa=cfg.fr_kappa,
target_sync_period=cfg.target_sync_period,
learning_starts=cfg.learning_starts,
model_push_period=cfg.model_push_period)
servers.start()
loops.start()
learner.connect()
learner_model.init_model()
learner_model.push()
start_time = time.perf_counter()
for epoch in range(cfg.num_epochs):
stats = learner.train(cfg.steps_per_epoch)
cur_time = time.perf_counter() - start_time
info = f"T Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Train", epoch=epoch, time=cur_time))
time.sleep(1)
stats = learner.eval(cfg.num_evaluation_episodes,
keep_training_loops=True)
cur_time = time.perf_counter() - start_time
info = f"E Epoch {epoch}"
if cfg.table_view:
logging.info("\n\n" + stats.table(info, time=cur_time) + "\n")
else:
logging.info(
stats.json(info, phase="Eval", epoch=epoch, time=cur_time))
torch.save(model.state_dict(), f"dqn_agent-{epoch}.pth")
time.sleep(1)
loops.terminate()
servers.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| 6,771 | 39.071006 | 78 | py |
rlmeta | rlmeta-main/examples/tutorials/loop_example.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import time
from typing import Optional
import numpy as np
import torch
import torch.multiprocessing as mp
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.agents.agent import Agent
from rlmeta.core.controller import Controller, Phase
from rlmeta.core.loop import ParallelLoop
from rlmeta.core.server import Server
from rlmeta.core.types import Action, TimeStep
from rlmeta.envs.env import Env, EnvFactory
class MockEnv(Env):
def __init__(self,
index: int,
observation_space: int = 4,
action_space: int = 4,
episode_length: int = 10) -> None:
self.index = index
self.observation_space = observation_space
self.action_space = action_space
self.episode_length = episode_length
self.step_counter = 0
def reset(self, *args, **kwargs) -> TimeStep:
print(f"[Env {self.index}] reset")
print("")
self.step_counter = 0
obs = torch.randn(self.observation_space)
info = {"step_counter": 0}
return TimeStep(obs, done=False, info=info)
def step(self, action: Action) -> TimeStep:
self.step_counter += 1
time.sleep(1.0)
obs = torch.randn(self.observation_space)
reward = np.random.randn()
done = self.step_counter == self.episode_length
info = {"step_counter": self.step_counter}
print(
f"[Env {self.index}] step = {self.step_counter}, reward = {reward}")
print("")
return TimeStep(obs, reward, done, info)
def close(self) -> None:
pass
def seed(self, seed: Optional[int] = None) -> None:
pass
class MockAgent(Agent):
def __init__(self, index: int, action_space: int = 4) -> None:
self.index = index
self.action_space = action_space
async def async_act(self, timestep: TimeStep) -> Action:
_, reward, _, info = timestep
step_counter = info["step_counter"]
await asyncio.sleep(1.0)
act = np.random.randint(self.action_space)
print(f"[Agent {self.index}] step = {step_counter}, action = {act}")
return Action(act)
async def async_observe_init(self, timestep: TimeStep) -> None:
pass
async def async_observe(self, action: Action,
next_timestep: TimeStep) -> None:
pass
async def async_update(self) -> None:
pass
def env_factory(index: int) -> MockEnv:
return MockEnv(index)
def agent_factory(index: int) -> MockAgent:
return MockAgent(index)
def main() -> None:
server = Server("server", "127.0.0.1:4411")
ctrl = Controller()
server.add_service(ctrl)
loop_ctrl = remote_utils.make_remote(ctrl, server)
main_ctrl = remote_utils.make_remote(ctrl, server)
loop = ParallelLoop(env_factory,
agent_factory,
loop_ctrl,
running_phase=Phase.EVAL,
num_rollouts=2,
num_workers=1)
server.start()
loop.start()
main_ctrl.connect()
main_ctrl.set_phase(Phase.EVAL, reset=True)
time.sleep(30)
loop.terminate()
server.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| 3,493 | 27.177419 | 80 | py |
rlmeta | rlmeta-main/examples/tutorials/remote_example.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import torch
import torch.multiprocessing as mp
import rlmeta.core.remote as remote
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.core.server import Server
class Adder(remote.Remotable):
@remote.remote_method()
def add(self, a, b):
print(f"[Adder.add] a = {a}")
print(f"[Adder.add] b = {b}")
return a + b
@remote.remote_method(batch_size=10)
def batch_add(self, a, b):
print(f"[Adder.batch_add] a = {a}")
print(f"[Adder.batch_add] b = {b}")
if not isinstance(a, tuple) and not isinstance(b, tuple):
return a + b
else:
return tuple(sum(x) for x in zip(a, b))
async def run_batch(adder_client, send_tensor=False):
futs = []
for i in range(20):
if send_tensor:
a = torch.tensor([i])
b = torch.tensor([i + 1])
else:
a = i
b = i + 1
fut = adder_client.async_batch_add(a, b)
futs.append(fut)
await asyncio.sleep(1.0)
for i, fut in enumerate(futs):
if send_tensor:
a = torch.tensor([i])
b = torch.tensor([i + 1])
else:
a = i
b = i + 1
c = await fut
print(f"{a} + {b} = {c}")
def main():
adder = Adder()
adder_server = Server(name="adder_server", addr="127.0.0.1:4411")
adder_server.add_service(adder)
adder_client = remote_utils.make_remote(adder, adder_server)
adder_server.start()
adder_client.connect()
a = 1
b = 2
c = adder_client.add(a, b)
print(f"{a} + {b} = {c}")
print("")
asyncio.run(run_batch(adder_client, send_tensor=False))
print("")
asyncio.run(run_batch(adder_client, send_tensor=True))
adder_server.terminate()
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
| 2,053 | 22.883721 | 69 | py |
rlmeta | rlmeta-main/tests/test_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
import rlmeta.utils.data_utils as data_utils
class TestCaseBase(unittest.TestCase):
def assert_tensor_equal(self, x, y):
self.assertTrue(isinstance(x, type(y)))
x = data_utils.to_numpy(x)
y = data_utils.to_numpy(y)
np.testing.assert_array_equal(x, y)
def assert_tensor_close(self, x, y, rtol=1e-7, atol=0):
self.assertTrue(isinstance(x, type(y)))
x = data_utils.to_numpy(x)
y = data_utils.to_numpy(y)
np.testing.assert_allclose(x, y, rtol, atol)
| 752 | 26.888889 | 65 | py |
rlmeta | rlmeta-main/tests/core/replay_buffer_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import rlmeta.utils.data_utils as data_utils
from rlmeta.core.replay_buffer import ReplayBuffer
from rlmeta.samplers import UniformSampler, PrioritizedSampler
from rlmeta.storage import CircularBuffer, TensorCircularBuffer
from tests.test_utils import TestCaseBase
class ReplayBufferTest(TestCaseBase):
def setUp(self) -> None:
self.size = 8
self.batch_size = 5
self.hidden_dim = 4
self.replay_buffer = ReplayBuffer(
CircularBuffer(self.size, collate_fn=torch.stack), UniformSampler())
self.flatten_data = dict(obs=torch.randn(self.batch_size,
self.hidden_dim),
rew=torch.randn(self.batch_size))
self.data = data_utils.unstack_fields(self.flatten_data,
self.batch_size)
def test_extend(self) -> None:
self.replay_buffer.reset()
keys = self.replay_buffer.extend(self.data)
expected_keys = torch.arange(self.batch_size)
self.assertEqual(len(self.replay_buffer), self.batch_size)
self.assert_tensor_equal(keys, expected_keys)
data = self.replay_buffer.get(keys)
self.assertEqual(data.keys(), self.flatten_data.keys())
for k, v in data.items():
self.assert_tensor_equal(v, self.flatten_data[k])
keys = self.replay_buffer.extend(self.data)
self.assertEqual(len(self.replay_buffer), self.size)
self.assert_tensor_equal(keys, expected_keys + self.batch_size)
data = self.replay_buffer.get(keys)
self.assertEqual(data.keys(), self.flatten_data.keys())
for k, v in data.items():
self.assert_tensor_equal(v, self.flatten_data[k])
def test_extend_stacked(self) -> None:
self.replay_buffer.reset()
keys = self.replay_buffer.extend(self.flatten_data, stacked=True)
expected_keys = torch.arange(self.batch_size)
self.assertEqual(len(self.replay_buffer), self.batch_size)
self.assert_tensor_equal(keys, expected_keys)
data = self.replay_buffer.get(keys)
self.assertEqual(data.keys(), self.flatten_data.keys())
for k, v in data.items():
self.assert_tensor_equal(v, self.flatten_data[k])
keys = self.replay_buffer.extend(self.flatten_data, stacked=True)
self.assertEqual(len(self.replay_buffer), self.size)
self.assert_tensor_equal(keys, expected_keys + self.batch_size)
data = self.replay_buffer.get(keys)
self.assertEqual(data.keys(), self.flatten_data.keys())
for k, v in data.items():
self.assert_tensor_equal(v, self.flatten_data[k])
def test_sample(self) -> None:
self.replay_buffer.reset()
self.replay_buffer.extend(self.data)
prob = 1.0 / self.batch_size
num_samples = self.batch_size
keys, _, probs = self.replay_buffer.sample(num_samples)
expected_probs = torch.full_like(probs, prob)
self.assert_tensor_equal(probs, expected_probs)
count = torch.bincount(keys)
self.assertEqual(count.max().item(), 1)
count = torch.zeros(self.batch_size, dtype=torch.int64)
for _ in range(20000):
keys, _, _ = self.replay_buffer.sample(3)
count[keys] += 1
actual_probs = count / count.sum()
expected_probs = torch.full_like(actual_probs, prob)
self.assert_tensor_close(actual_probs, expected_probs, atol=0.05)
# Test sample with replacement.
num_samples = 20000
keys, _, probs = self.replay_buffer.sample(num_samples,
replacement=True)
self.assert_tensor_equal(
probs, torch.full((num_samples,), prob, dtype=torch.float64))
actual_probs = torch.bincount(keys).float() / num_samples
expected_probs = torch.full_like(actual_probs, prob)
self.assert_tensor_close(actual_probs, expected_probs, atol=0.05)
def test_clear(self) -> None:
self.replay_buffer.reset()
self.replay_buffer.extend(self.data)
self.assertEqual(len(self.replay_buffer), len(self.data))
self.replay_buffer.clear()
self.assertEqual(len(self.replay_buffer), 0)
self.replay_buffer.extend(self.data)
self.assertEqual(len(self.replay_buffer), len(self.data))
class PrioritizedReplayBufferTest(TestCaseBase):
def setUp(self):
self.size = 8
self.batch_size = 5
self.hidden_dim = 4
self.flatten_data = dict(obs=torch.randn(self.batch_size,
self.hidden_dim),
rew=torch.randn(self.batch_size))
self.data = data_utils.unstack_fields(self.flatten_data,
self.batch_size)
def test_extend(self):
replay_buffer = ReplayBuffer(TensorCircularBuffer(self.size),
PrioritizedSampler(priority_exponent=0.6))
keys = replay_buffer.extend(self.data)
expected_keys = torch.arange(self.batch_size)
self.assertEqual(len(replay_buffer), self.batch_size)
self.assert_tensor_equal(keys, expected_keys)
data = replay_buffer.get(keys)
self.assertEqual(data.keys(), self.flatten_data.keys())
for k, v in data.items():
self.assert_tensor_equal(v, self.flatten_data[k])
keys = replay_buffer.extend(self.data)
self.assertEqual(len(replay_buffer), self.size)
self.assert_tensor_equal(keys, expected_keys + self.batch_size)
data = replay_buffer.get(keys)
self.assertEqual(data.keys(), self.flatten_data.keys())
for k, v in data.items():
self.assert_tensor_equal(v, self.flatten_data[k])
def test_extend_stacked(self):
replay_buffer = ReplayBuffer(TensorCircularBuffer(self.size),
PrioritizedSampler(priority_exponent=0.6))
keys = replay_buffer.extend(self.flatten_data, stacked=True)
expected_keys = torch.arange(self.batch_size)
self.assertEqual(len(replay_buffer), self.batch_size)
self.assert_tensor_equal(keys, expected_keys)
data = replay_buffer.get(keys)
self.assertEqual(data.keys(), self.flatten_data.keys())
for k, v in data.items():
self.assert_tensor_equal(v, self.flatten_data[k])
keys = replay_buffer.extend(self.flatten_data, stacked=True)
self.assertEqual(len(replay_buffer), self.size)
self.assert_tensor_equal(keys, expected_keys + self.batch_size)
data = replay_buffer.get(keys)
self.assertEqual(data.keys(), self.flatten_data.keys())
for k, v in data.items():
self.assert_tensor_equal(v, self.flatten_data[k])
def test_sample(self):
replay_buffer = ReplayBuffer(TensorCircularBuffer(self.size),
PrioritizedSampler(priority_exponent=1.0))
priorities = torch.rand((self.batch_size,)) * 10
expected_probs = priorities / priorities.sum()
replay_buffer.extend(self.data, priorities=priorities)
# Test sample without replacement.
# Disable this test because of stability.
# num_samples = self.batch_size
# keys, _, probs = replay_buffer.sample(num_samples)
# self.assert_tensor_close(probs,
# expected_probs[keys],
# rtol=1e-6,
# atol=1e-6)
# count = torch.bincount(keys)
# self.assertEqual(count.max().item(), 1)
# count = torch.zeros(self.batch_size, dtype=torch.int64)
# for _ in range(100000):
# keys, _, _ = replay_buffer.sample(3)
# count[keys] += 1
# actual_probs = count / count.sum()
# self.assert_tensor_close(actual_probs, expected_probs, atol=0.1)
# Test sample with replacement.
num_samples = 100000
keys, _, probs = replay_buffer.sample(num_samples, replacement=True)
actual_probs = torch.bincount(keys).float() / num_samples
self.assert_tensor_close(probs, expected_probs[keys], rtol=1e-6)
self.assert_tensor_close(actual_probs, expected_probs, atol=0.05)
def test_update(self):
alpha = 0.6
replay_buffer = ReplayBuffer(
TensorCircularBuffer(self.size),
PrioritizedSampler(priority_exponent=alpha))
priorities = torch.rand((self.batch_size,)) * 10
keys = replay_buffer.extend(self.data, priorities=priorities)
priorities = torch.rand((self.batch_size,)) * 10
expected_probs = priorities.pow(alpha)
expected_probs.div_(expected_probs.sum())
replay_buffer.update(keys, priorities)
num_samples = 100
keys, _, probs = replay_buffer.sample(num_samples, replacement=True)
self.assert_tensor_close(probs, expected_probs[keys], rtol=1e-6)
def test_reset(self) -> None:
replay_buffer = ReplayBuffer(TensorCircularBuffer(self.size),
PrioritizedSampler(priority_exponent=0.6))
replay_buffer.extend(self.data)
self.assertEqual(len(replay_buffer), len(self.data))
replay_buffer.reset()
self.assertEqual(len(replay_buffer), 0)
self.assertFalse(replay_buffer._storage._impl.initialized)
replay_buffer.extend(self.data)
self.assertEqual(len(replay_buffer), len(self.data))
def test_clear(self) -> None:
replay_buffer = ReplayBuffer(TensorCircularBuffer(self.size),
PrioritizedSampler(priority_exponent=0.6))
replay_buffer.extend(self.data)
self.assertEqual(len(replay_buffer), len(self.data))
replay_buffer.clear()
self.assertEqual(len(replay_buffer), 0)
self.assertTrue(replay_buffer._storage._impl.initialized)
replay_buffer.extend(self.data)
self.assertEqual(len(replay_buffer), len(self.data))
if __name__ == "__main__":
unittest.main()
| 10,383 | 42.087137 | 80 | py |
rlmeta | rlmeta-main/tests/core/remotable_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
import rlmeta.core.remote as remote
import rlmeta.utils.remote_utils as remote_utils
from rlmeta.core.server import Server
class RemotableAdder(remote.Remotable):
@remote.remote_method()
def add(self, a, b):
return a + b
class ReplayBufferTest(unittest.TestCase):
def test_add_multiple(self):
server = Server(name="adder_server", addr="127.0.0.1:4412")
adder1 = RemotableAdder('a')
adder2 = RemotableAdder('b')
self.assertEqual(adder1.identifier, 'a')
self.assertEqual(adder2.identifier, 'b')
server.add_service([adder1, adder2])
adder_client1 = remote_utils.make_remote(adder1, server)
adder_client2 = remote_utils.make_remote(adder2, server)
server.start()
adder_client1.connect()
c = adder_client1.add(1, 1)
self.assertEqual(c, 2)
adder_client2.connect()
c = adder_client2.add(1, 1)
self.assertEqual(c, 2)
server.terminate()
if __name__ == "__main__":
unittest.main()
| 1,261 | 26.434783 | 67 | py |
rlmeta | rlmeta-main/tests/core/rescalers_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from rlmeta.core.rescalers import MomentsRescaler, RMSRescaler, SqrtRescaler
from tests.test_utils import TestCaseBase
class RescalerTest(TestCaseBase):
def setUp(self) -> None:
self.size = (4, 5)
self.rtol = 1e-5
self.atol = 1e-5
def test_rms_rescaler(self) -> None:
rms_rescaler = RMSRescaler(self.size)
batch_size = np.random.randint(low=1, high=10)
data = torch.rand(batch_size, *self.size)
for x in torch.unbind(data):
rms_rescaler.update(x)
x = torch.rand(*self.size)
y = rms_rescaler.rescale(x)
y_expected = x / data.square().mean(dim=0).sqrt()
self.assert_tensor_close(y, y_expected, rtol=self.rtol, atol=self.atol)
self.assert_tensor_close(rms_rescaler.recover(y),
x,
rtol=self.rtol,
atol=self.atol)
def test_norm_rescaler(self) -> None:
norm_rescaler = MomentsRescaler(self.size)
batch_size = np.random.randint(low=1, high=10)
data = torch.rand(batch_size, *self.size)
for x in torch.unbind(data):
norm_rescaler.update(x)
x = torch.rand(*self.size)
y = norm_rescaler.rescale(x)
if batch_size == 1:
y_expected = x
else:
y_expected = (x - data.mean(dim=0)) / data.std(dim=0,
unbiased=False)
self.assert_tensor_close(y, y_expected, rtol=self.rtol, atol=self.atol)
self.assert_tensor_close(norm_rescaler.recover(y),
x,
rtol=self.rtol,
atol=self.atol)
def test_sqrt_rescaler(self) -> None:
eps = np.random.choice([0.0, 1e-5, 1e-3, 2e-2, 0.5])
sqrt_rescaler = SqrtRescaler(eps)
x = torch.randn(*self.size, dtype=torch.float64)
y = sqrt_rescaler.rescale(x)
y_expected = x.sign() * ((x.abs() + 1).sqrt() - 1) + eps * x
self.assert_tensor_close(y, y_expected, rtol=self.rtol, atol=self.atol)
self.assert_tensor_close(sqrt_rescaler.recover(y),
x,
rtol=self.rtol,
atol=self.atol)
if __name__ == "__main__":
unittest.main()
| 2,621 | 33.5 | 79 | py |
rlmeta | rlmeta-main/tests/utils/running_stats_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from rlmeta.utils.running_stats import RunningMoments, RunningRMS
from tests.test_utils import TestCaseBase
class RunningRMSTest(TestCaseBase):
def setUp(self) -> None:
self.outer_size = 10
self.inner_size = (4, 5)
self.running_rms = RunningRMS(self.inner_size)
self.rtol = 1e-6
self.atol = 1e-6
def test_single_update(self) -> None:
input = torch.rand(self.outer_size, *self.inner_size)
self.running_rms.reset()
for x in torch.unbind(input):
self.running_rms.update(x)
self._verify_running_rms(input)
def test_batch_update(self) -> None:
input = torch.rand(self.outer_size, *self.inner_size)
split_size = [1, 2, 3, 4]
self.running_rms.reset()
for x in torch.split(input, split_size):
self.running_rms.update(x)
self._verify_running_rms(input)
def _verify_running_rms(self, input: torch.Tensor) -> None:
self.assert_tensor_equal(self.running_rms.count(),
torch.tensor([self.outer_size]))
self.assert_tensor_close(self.running_rms.mean_square(),
input.square().mean(dim=0),
rtol=self.rtol,
atol=self.atol)
self.assert_tensor_close(self.running_rms.rms(),
input.square().mean(dim=0).sqrt(),
rtol=self.rtol,
atol=self.atol)
self.assert_tensor_close(self.running_rms.rrms(),
input.square().mean(dim=0).rsqrt(),
rtol=self.rtol,
atol=self.atol)
class RunningMomentsTest(TestCaseBase):
def setUp(self) -> None:
self.outer_size = 10
self.inner_size = (4, 5)
self.running_moments = RunningMoments(self.inner_size)
self.rtol = 1e-6
self.atol = 1e-6
def test_single_update(self) -> None:
input = torch.rand(self.outer_size, *self.inner_size)
self.running_moments.reset()
for x in torch.unbind(input):
self.running_moments.update(x)
self._verify_running_moments(input)
def test_batch_update(self) -> None:
input = torch.rand(self.outer_size, *self.inner_size)
split_size = [1, 2, 3, 4]
self.running_moments.reset()
for x in torch.split(input, split_size):
self.running_moments.update(x)
self._verify_running_moments(input)
def _verify_running_moments(self, input: torch.Tensor) -> None:
self.assert_tensor_equal(self.running_moments.count(),
torch.tensor([self.outer_size]))
self.assert_tensor_close(self.running_moments.mean(),
input.mean(dim=0),
rtol=self.rtol,
atol=self.atol)
self.assert_tensor_close(self.running_moments.var(),
input.var(dim=0, unbiased=False),
rtol=self.rtol,
atol=self.atol)
self.assert_tensor_close(self.running_moments.var(ddof=1),
input.var(dim=0, unbiased=True),
rtol=self.rtol,
atol=self.atol)
self.assert_tensor_close(self.running_moments.std(),
input.std(dim=0, unbiased=False),
rtol=self.rtol,
atol=self.atol)
self.assert_tensor_close(self.running_moments.std(ddof=1),
input.std(dim=0, unbiased=True),
rtol=self.rtol,
atol=self.atol)
self.assert_tensor_close(self.running_moments.rstd(),
input.std(dim=0, unbiased=False).reciprocal(),
rtol=self.rtol,
atol=self.atol)
self.assert_tensor_close(self.running_moments.rstd(ddof=1),
input.std(dim=0, unbiased=True).reciprocal(),
rtol=self.rtol,
atol=self.atol)
if __name__ == "__main__":
unittest.main()
| 4,653 | 39.824561 | 79 | py |
rlmeta | rlmeta-main/tests/data/segment_tree_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import unittest
from math import prod
import numpy as np
import torch
from rlmeta.data import SumSegmentTree
from tests.test_utils import TestCaseBase
class SumSegmentTreeTest(TestCaseBase):
def setUp(self) -> None:
self.size = 100
self.data = torch.randn(self.size)
self.segment_tree = SumSegmentTree(self.size, dtype=np.float32)
self.segment_tree[torch.arange(self.size)] = self.data
self.query_size = (2, 3, 4)
def test_at(self) -> None:
index = torch.randint(self.size, self.query_size)
value = self.segment_tree[index]
self.assert_tensor_equal(value, self.data[index])
value = self.segment_tree.at(index)
self.assert_tensor_equal(value, self.data[index])
value = self.segment_tree[index.numpy()]
self.assert_tensor_equal(value, self.data[index].numpy())
value = self.segment_tree.at(index.numpy())
self.assert_tensor_equal(value, self.data[index].numpy())
def test_update(self) -> None:
weights = torch.ones(self.size)
index = weights.multinomial(prod(self.query_size), replacement=False)
index = index.view(self.query_size)
origin_value = self.segment_tree[index]
value = np.random.randn()
self.segment_tree[index] = value
self.assert_tensor_equal(self.segment_tree[index],
torch.full(self.query_size, value))
self.segment_tree[index] = origin_value
value = np.random.randn()
self.segment_tree.update(index, value)
self.assert_tensor_equal(self.segment_tree[index],
torch.full(self.query_size, value))
self.segment_tree[index] = origin_value
value = torch.randn(self.query_size)
self.segment_tree[index] = value
self.assert_tensor_equal(self.segment_tree[index], value)
self.segment_tree[index] = origin_value
value = torch.randn(self.query_size)
self.segment_tree.update(index, value)
self.assert_tensor_equal(self.segment_tree[index], value)
self.segment_tree[index] = origin_value
def test_masked_update(self) -> None:
weights = torch.ones(self.size)
index = weights.multinomial(prod(self.query_size), replacement=False)
index = index.view(self.query_size)
origin_value = self.segment_tree[index]
mask = torch.randint(2, size=self.query_size, dtype=torch.bool)
value = torch.randn(self.query_size)
self.segment_tree.update(index, value, mask)
self.assert_tensor_equal(self.segment_tree[index],
torch.where(mask, value, origin_value))
self.segment_tree[index] = origin_value
def test_query(self) -> None:
a = torch.randint(self.size, self.query_size)
b = torch.randint(self.size, self.query_size)
l = torch.minimum(a, b)
r = torch.maximum(a, b)
value = self.segment_tree.query(l, r)
l_list = l.view(-1).tolist()
r_list = r.view(-1).tolist()
ret = []
for (x, y) in zip(l_list, r_list):
ret.append(self.data[x:y].sum())
ret = torch.tensor(ret).view(self.query_size)
self.assert_tensor_close(value, ret, rtol=1e-6, atol=1e-6)
def test_pickle(self) -> None:
s = pickle.dumps(self.segment_tree)
t = pickle.loads(s)
self.assert_tensor_equal(t[torch.arange(self.size)], self.data)
for _ in range(10):
l = np.random.randint(self.size)
r = np.random.randint(self.size)
if l > r:
l, r = r, l
ret = t.query(l, r)
ans = self.data[l:r].sum().item()
self.assertAlmostEqual(ret, ans, places=5)
if __name__ == "__main__":
unittest.main()
| 4,036 | 35.044643 | 77 | py |
rlmeta | rlmeta-main/tests/ops/discounted_return_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Union
import torch
import rlmeta.ops as ops
from tests.test_utils import TestCaseBase
class DiscountReturnTest(TestCaseBase):
def setUp(self) -> None:
self.rtol = 1e-6
self.atol = 1e-6
def test_discounted_return_with_scalar_gamma(self) -> None:
n = 100
gamma = torch.rand(1).item()
reward = torch.randn(n)
g = ops.discounted_return(reward, gamma)
expected_g = self._discounted_return(reward, gamma)
self.assert_tensor_close(g, expected_g, rtol=self.rtol, atol=self.atol)
reward = torch.randn(n, 1)
g = ops.discounted_return(reward, gamma)
expected_g = self._discounted_return(reward, gamma)
self.assert_tensor_close(g, expected_g, rtol=self.rtol, atol=self.atol)
def test_discounted_return_with_tensor_gamma(self) -> None:
n = 200
reward = torch.randn(n)
gamma = torch.rand(1)
g = ops.discounted_return(reward, gamma)
expected_g = self._discounted_return(reward, gamma)
self.assert_tensor_close(g, expected_g, rtol=self.rtol, atol=self.atol)
reward = torch.randn(n, 1)
gamma = torch.rand(n, 1)
g = ops.discounted_return(reward, gamma)
expected_g = self._discounted_return(reward, gamma)
self.assert_tensor_close(g, expected_g, rtol=self.rtol, atol=self.atol)
def _discounted_return(self, reward: torch.Tensor,
gamma: Union[float, torch.Tensor]) -> torch.Tensor:
n = reward.size(0)
g = torch.zeros(1)
ret = []
for i in range(n - 1, -1, -1):
if isinstance(gamma, float):
gamma_i = gamma
elif gamma.numel() == 1:
gamma_i = gamma.item()
else:
gamma_i = gamma[i].item()
g = reward[i] + gamma_i * g
ret.append(g)
ret = torch.stack(tuple(reversed(ret)))
if reward.dim() == 1:
ret.squeeze_(-1)
return ret
if __name__ == "__main__":
unittest.main()
| 2,278 | 29.797297 | 79 | py |
rlmeta | rlmeta-main/tests/ops/generalized_advantage_estimation_test.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Optional, Union
import torch
import rlmeta.ops as ops
from tests.test_utils import TestCaseBase
class GeneralizedAdvantageEstimationTest(TestCaseBase):
def setUp(self) -> None:
self.rtol = 1e-6
self.atol = 1e-6
def test_gae_with_scalar_parameter(self) -> None:
n = 100
gamma = torch.rand(1).item()
gae_lambda = torch.rand(1).item()
reward = torch.randn(n)
value = torch.randn(n)
gae = ops.generalized_advantage_estimation(reward, value, gamma,
gae_lambda)
expected_gae = self._gae(reward, value, gamma, gae_lambda)
self.assert_tensor_close(gae,
expected_gae,
rtol=self.rtol,
atol=self.atol)
reward = torch.randn(n, 1)
value = torch.randn(n, 1)
last_v = torch.randn(1)
gae = ops.generalized_advantage_estimation(reward, value, gamma,
gae_lambda, last_v)
expected_gae = self._gae(reward, value, gamma, gae_lambda, last_v)
self.assert_tensor_close(gae,
expected_gae,
rtol=self.rtol,
atol=self.atol)
def test_gae_with_tensor_parameter(self) -> None:
n = 200
reward = torch.randn(n)
value = torch.randn(n)
gamma = torch.rand(1)
gae_lambda = torch.rand(1)
gae = ops.generalized_advantage_estimation(reward, value, gamma,
gae_lambda)
expected_gae = self._gae(reward, value, gamma, gae_lambda)
self.assert_tensor_close(gae,
expected_gae,
rtol=self.rtol,
atol=self.atol)
reward = torch.randn(n, 1)
value = torch.randn(n, 1)
gamma = torch.rand(n, 1)
gae_lambda = torch.rand(n, 1)
last_v = torch.randn(1)
gae = ops.generalized_advantage_estimation(reward, value, gamma,
gae_lambda, last_v)
expected_gae = self._gae(reward, value, gamma, gae_lambda, last_v)
self.assert_tensor_close(gae,
expected_gae,
rtol=self.rtol,
atol=self.atol)
def _gae(self,
reward: torch.Tensor,
value: torch.Tensor,
gamma: Union[float, torch.Tensor],
gae_lambda: Union[float, torch.Tensor],
last_v: Optional[torch.Tensor] = None) -> torch.Tensor:
n = reward.size(0)
v = torch.zeros(1) if last_v is None else last_v
adv = torch.zeros(1)
gae = []
for i in range(n - 1, -1, -1):
if isinstance(gamma, float):
gamma_i = gamma
elif gamma.numel() == 1:
gamma_i = gamma.item()
else:
gamma_i = gamma[i].item()
if isinstance(gae_lambda, float):
lambda_i = gae_lambda
elif gae_lambda.numel() == 1:
lambda_i = gae_lambda.item()
else:
lambda_i = gae_lambda[i].item()
delta = reward[i] + gamma_i * v - value[i]
v = value[i]
adv = delta + gamma_i * lambda_i * adv
gae.append(adv)
gae = torch.stack(tuple(reversed(gae)))
if reward.dim() == 1:
gae.squeeze_(-1)
return gae
if __name__ == "__main__":
unittest.main()
| 3,929 | 33.173913 | 74 | py |
rlmeta | rlmeta-main/rlmeta/core/loop.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import asyncio
import copy
import logging
import time
from typing import Dict, List, NoReturn, Optional, Sequence, Union
from rich.console import Console
import torch
import torch.multiprocessing as mp
import moolib
import rlmeta.core.remote as remote
import rlmeta.utils.asyncio_utils as asyncio_utils
import rlmeta.utils.moolib_utils as moolib_utils
from rlmeta.agents.agent import Agent, AgentFactory
from rlmeta.core.callbacks import EpisodeCallbacks
from rlmeta.core.controller import Controller, ControllerLike, Phase
from rlmeta.core.launchable import Launchable
from rlmeta.envs.env import Env, EnvFactory
console = Console()
class Loop(abc.ABC):
@abc.abstractmethod
def run(self, num_episodes: Optional[int] = None) -> None:
"""
"""
class AsyncLoop(Loop, Launchable):
def __init__(self,
env_factory: EnvFactory,
agent_factory: AgentFactory,
controller: ControllerLike,
running_phase: Phase,
should_update: bool = False,
num_rollouts: int = 1,
index: int = 0,
index_offset: Optional[int] = None,
seed: Optional[int] = None,
episode_callbacks: Optional[EpisodeCallbacks] = None) -> None:
self._running_phase = running_phase
self._should_update = should_update
self._index = index
self._num_rollouts = num_rollouts
if index_offset is None:
self._index_offset = index * num_rollouts
else:
self._index_offset = index_offset
self._seed = seed
self._env_factory = env_factory
self._agent_factory = agent_factory
self._envs = []
self._agents = []
self._controller = controller
self._loop = None
self._tasks = []
self._running = False
self._episode_callbacks = episode_callbacks
@property
def running_phase(self) -> Phase:
return self._running_phase
@property
def should_update(self) -> bool:
return self._should_update
@property
def num_rollouts(self) -> int:
return self._num_rollouts
@property
def index(self) -> int:
return self._index
@property
def index_offset(self) -> int:
return self._index_offset
@property
def seed(self) -> Optional[int]:
return self._seed
@property
def running(self) -> bool:
return self._running
@running.setter
def running(self, running: bool) -> None:
self._running = running
def init_launching(self) -> None:
pass
def init_execution(self) -> None:
for i in range(self._num_rollouts):
env = self._env_factory(self.index_offset + i)
if self.seed is not None:
env.reset(seed=self.seed + self.index_offset + i)
self._envs.append(env)
for i in range(self._num_rollouts):
agent = self._agent_factory(self.index_offset + i)
agent.connect()
# if self.seed is not None:
# agent.seed(self.seed + self.index_offset + i)
self._agents.append(agent)
for obj_name in dir(self):
obj = getattr(self, obj_name)
if isinstance(obj, remote.Remote):
obj.name = moolib_utils.expend_name_by_index(
obj.name, self.index)
obj.connect()
for obj_name in dir(self):
obj = getattr(self, obj_name)
if isinstance(obj, Launchable):
obj.init_execution()
def run(self) -> NoReturn:
console.log(f"Starting async loop with: {self._controller}")
self._loop = asyncio.get_event_loop()
self._tasks.append(
asyncio_utils.create_task(self._loop, self._check_phase()))
for i, (env, agent) in enumerate(zip(self._envs, self._agents)):
index = self.index_offset + i
task = asyncio_utils.create_task(
self._loop,
self._run_loop(index, env, agent,
copy.deepcopy(self._episode_callbacks)))
self._tasks.append(task)
try:
self._loop.run_forever()
except Exception as e:
logging.error(e)
raise e
finally:
for task in self._tasks:
task.cancel()
self._loop.stop()
async def _check_phase(self) -> NoReturn:
while True:
cur_phase = await self._controller.async_phase()
self._running = ((cur_phase &
self._running_phase) == self._running_phase)
await asyncio.sleep(1)
async def _run_loop(
self,
index: int,
env: Env,
agent: Agent,
episode_callbacks: Optional[EpisodeCallbacks] = None) -> NoReturn:
while True:
while not self.running:
await asyncio.sleep(1)
stats = await self._run_episode(index, env, agent,
episode_callbacks)
if self.running and stats is not None:
await self._controller.async_add_episode(
self._running_phase, stats)
# The method _run_episode is adapted from Acme's Enviroment.run_episode:
# https://github.com/deepmind/acme/blob/df961057bcd2e1436d5f894ebced62d694225034/acme/environment_loop.py#L76
#
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
async def _run_episode(
self,
index: int,
env: Env,
agent: Agent,
episode_callbacks: Optional[EpisodeCallbacks] = None
) -> Optional[Dict[str, float]]:
episode_length = 0
episode_return = 0.0
start_time = time.perf_counter()
if episode_callbacks is not None:
episode_callbacks.reset()
episode_callbacks.on_episode_start(index)
timestep = env.reset()
await agent.async_observe_init(timestep)
if episode_callbacks is not None:
episode_callbacks.on_episode_init(index, timestep)
while not timestep.terminated and not timestep.truncated:
if not self.running:
return None
action = await agent.async_act(timestep)
timestep = env.step(action)
await agent.async_observe(action, timestep)
if self.should_update:
await agent.async_update()
episode_length += 1
episode_return += timestep.reward
if episode_callbacks is not None:
episode_callbacks.on_episode_step(index, episode_length - 1,
action, timestep)
episode_time = time.perf_counter() - start_time
steps_per_second = episode_length / episode_time
if episode_callbacks is not None:
episode_callbacks.on_episode_end(index)
metrics = {
"episode_length": float(episode_length),
"episode_return": episode_return,
"episode_time/s": episode_time,
"steps_per_second": steps_per_second,
}
if episode_callbacks is not None:
metrics.update(episode_callbacks.custom_metrics)
return metrics
class ParallelLoop(Loop):
def __init__(self,
env_factory: EnvFactory,
agent_factory: AgentFactory,
controller: Union[Controller, remote.Remote],
running_phase: Phase,
should_update: bool = False,
num_rollouts: int = 1,
num_workers: Optional[int] = None,
index: int = 0,
index_offset: Optional[int] = None,
seed: Optional[int] = None,
episode_callbacks: Optional[EpisodeCallbacks] = None) -> None:
self._running_phase = running_phase
self._should_update = should_update
self._index = index
self._num_rollouts = num_rollouts
self._num_workers = min(mp.cpu_count(), self._num_rollouts)
if num_workers is not None:
self._num_workers = min(self._num_workers, num_workers)
if index_offset is None:
self._index_offset = index * num_rollouts
else:
self._index_offset = index_offset
self._env_factory = env_factory
self._agent_factory = agent_factory
self._controller = controller
self._seed = seed
self._episode_callbacks = episode_callbacks
self._workloads = self._compute_workloads()
self._async_loops = []
self._processes = []
index_offset = self._index_offset
for i, workload in enumerate(self._workloads):
loop = AsyncLoop(self._env_factory, self._agent_factory,
self._controller, self._running_phase,
self._should_update, workload, i, index_offset,
self._seed, self._episode_callbacks)
self._async_loops.append(loop)
index_offset += workload
@property
def running_phase(self) -> Phase:
return self._running_phase
@property
def should_update(self) -> bool:
return self._should_update
@property
def num_rollouts(self) -> int:
return self._num_rollouts
@property
def num_workers(self) -> int:
return self._num_workers
@property
def index(self) -> int:
return self._index
@property
def index_offset(self) -> int:
return self._index_offset
@property
def seed(self) -> Optional[int]:
return self._seed
def run(self) -> NoReturn:
self.start()
self.join()
def start(self) -> None:
processes = []
for loop in self._async_loops:
loop.init_launching()
process = mp.Process(target=self._run_async_loop, args=(loop,))
processes.append(process)
for process in processes:
process.start()
self._processes = processes
def join(self) -> None:
for process in self._processes:
process.join()
def terminate(self) -> None:
for process in self._processes:
process.terminate()
def _compute_workloads(self) -> List[int]:
workload = self.num_rollouts // self.num_workers
r = self.num_rollouts % self.num_workers
workloads = [workload + 1] * r + [workload] * (self.num_workers - r)
return workloads
def _run_async_loop(self, loop: AsyncLoop) -> NoReturn:
if loop.seed is not None:
torch.manual_seed(loop.seed + loop.index_offset)
loop.init_execution()
loop.run()
class LoopList:
def __init__(self, loops: Optional[Sequence[Loop]] = None) -> None:
self._loops = []
if loops is not None:
self._loops.extend(loops)
@property
def loops(self) -> List[Loop]:
return self._loops
def append(self, loop: Loop) -> None:
self.loops.append(loop)
def extend(self, loops: Union[LoopList, Sequence[Loop]]) -> None:
if isinstance(loops, LoopList):
self.loops.extend(loops.loops)
else:
self.loops.extend(loops)
def start(self) -> None:
for loop in self.loops:
loop.start()
def join(self) -> None:
for loop in self.loops:
loop.join()
def terminate(self) -> None:
for loop in self.loops:
loop.terminate()
LoopLike = Union[Loop, LoopList]
| 12,555 | 30.949109 | 113 | py |
rlmeta | rlmeta-main/rlmeta/core/server.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import asyncio
import logging
from typing import Any, Callable, List, NoReturn, Optional, Sequence, Union
import torch
import torch.multiprocessing as mp
from rich.console import Console
import moolib
import rlmeta.utils.asyncio_utils as asyncio_utils
from rlmeta.core.launchable import Launchable
from rlmeta.core.remote import Remotable
console = Console()
class Server(Launchable):
def __init__(self, name: str, addr: str, timeout: float = 60) -> None:
self._name = name
self._addr = addr
self._timeout = timeout
self._services = []
self._process = None
self._server = None
self._loop = None
self._tasks = None
def __repr__(self):
return f'Server(name={self._name} addr={self._addr})'
@property
def name(self) -> str:
return self._name
@property
def addr(self) -> str:
return self._addr
@property
def timeout(self) -> float:
return self._timeout
def add_service(self, service: Union[Remotable,
Sequence[Remotable]]) -> None:
if isinstance(service, (list, tuple)):
self._services.extend(service)
else:
self._services.append(service)
def start(self) -> None:
self.init_launching()
self._process = mp.Process(target=self.run)
self._process.start()
def join(self) -> None:
self._process.join()
def terminate(self) -> None:
if self._process is not None:
self._process.terminate()
def run(self) -> NoReturn:
self.init_execution()
self._start_services()
def init_launching(self) -> None:
for service in self._services:
if isinstance(service, Launchable):
service.init_launching()
def init_execution(self) -> None:
for service in self._services:
if isinstance(service, Launchable):
service.init_execution()
self._server = moolib.Rpc()
self._server.set_name(self._name)
self._server.set_timeout(self._timeout)
console.log(f"Server={self.name} listening to {self._addr}")
try:
self._server.listen(self._addr)
except:
console.log(f"ERROR on listen({self._addr}) from: server={self}")
raise
def _start_services(self) -> NoReturn:
self._loop = asyncio.get_event_loop()
self._tasks = []
console.log(f"Server={self.name} starting services: {self._services}")
for service in self._services:
for method in service.remote_methods:
method_impl = getattr(service, method)
batch_size = getattr(method_impl, "__batch_size__", None)
self._add_server_task(service.remote_method_name(method),
method_impl, batch_size)
try:
if not self._loop.is_running():
self._loop.run_forever()
except Exception as e:
logging.error(e)
raise
finally:
for task in self._tasks:
task.cancel()
self._loop.stop()
self._loop.close()
console.log(f"Server={self.name} services started")
def _add_server_task(self, func_name: str, func_impl: Callable[..., Any],
batch_size: Optional[int]) -> None:
if batch_size is None:
que = self._server.define_queue(func_name)
else:
que = self._server.define_queue(func_name,
batch_size=batch_size,
dynamic_batching=True)
task = asyncio_utils.create_task(self._loop,
self._async_process(que, func_impl))
self._tasks.append(task)
async def _async_process(self, que: moolib.Queue,
func: Callable[..., Any]) -> None:
try:
while True:
ret_cb, args, kwargs = await que
ret = func(*args, **kwargs)
ret_cb(ret)
except asyncio.CancelledError:
pass
except Exception as e:
logging.error(e)
raise e
class ServerList:
def __init__(self, servers: Optional[Sequence[Server]] = None) -> None:
self._servers = []
if servers is not None:
self._servers.extend(servers)
def __getitem__(self, index: int) -> Server:
return self._servers[index]
@property
def servers(self) -> List[Server]:
return self._servers
def append(self, server: Server) -> None:
self.servers.append(server)
def extend(self, servers: Union[ServerList, Sequence[Server]]) -> None:
if isinstance(servers, ServerList):
self.servers.extend(servers.servers)
else:
self.servers.extend(servers)
def start(self) -> None:
for server in self.servers:
server.start()
def join(self) -> None:
for server in self.servers:
server.join()
def terminate(self) -> None:
for server in self.servers:
server.terminate()
ServerLike = Union[Server, ServerList]
| 5,518 | 28.994565 | 78 | py |
rlmeta | rlmeta-main/rlmeta/core/model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import functools
import random
from enum import IntEnum
from typing import (Any, Awaitable, Callable, Dict, Optional, Sequence, Tuple,
Union)
from rich.console import Console
import numpy as np
import torch
import torch.nn as nn
import rlmeta.core.remote as remote
import rlmeta.ops as rlmeta_ops
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
import rlmeta.utils.random_utils as random_utils
from rlmeta.core.launchable import Launchable
from rlmeta.core.server import Server
from rlmeta.core.types import NestedTensor
from rlmeta.samplers import UniformSampler
from rlmeta.storage.circular_buffer import CircularBuffer
console = Console()
class ModelVersion(IntEnum):
# Use negative values for latest version flag to avoid conflict with real
# version.
LATEST = -0x7FFFFFFF
STABLE = -1
class RemotableModel(nn.Module, remote.Remotable):
def __init__(self, identifier: Optional[str] = None) -> None:
nn.Module.__init__(self)
remote.Remotable.__init__(self, identifier)
@property
def device(self) -> torch.device:
return next(self.parameters()).device
def init_model(self) -> None:
pass
class RemotableModelPool(remote.Remotable, Launchable):
def __init__(self,
model: RemotableModel,
capacity: int = 0,
seed: Optional[int] = None,
identifier: Optional[str] = None) -> None:
super().__init__(identifier)
self._model = model
self._capacity = capacity
self._seed = seed
if self._capacity > 0:
self._history = CircularBuffer(self._capacity)
@property
def capacity(self) -> int:
return self._capacity
@property
def seed(self) -> Optional[int]:
return self._seed
def init_launching(self) -> None:
self._model.share_memory()
def init_execution(self) -> None:
self._bind()
if self._seed is not None:
random_utils.manual_seed(self._seed)
self._model.init_model()
console.log(self._model)
def model(self, version: int = ModelVersion.LATEST) -> nn.Module:
return (self._model if version == ModelVersion.LATEST else
self._history[version][1])
@remote.remote_method(batch_size=None)
def pull(self,
version: int = ModelVersion.LATEST) -> Dict[str, torch.Tensor]:
state_dict = self.model(version).state_dict()
state_dict = nested_utils.map_nested(lambda x: x.cpu(), state_dict)
return state_dict
@remote.remote_method(batch_size=None)
def push(self, state_dict: Dict[str, torch.Tensor]) -> None:
# Move state_dict to device before loading.
# https://github.com/pytorch/pytorch/issues/34880
device = self._model.device
state_dict = nested_utils.map_nested(lambda x: x.to(device), state_dict)
self._model.load_state_dict(state_dict)
@remote.remote_method(batch_size=None)
def release(self) -> None:
if self._capacity > 0:
self._history.append(copy.deepcopy(self._model))
@remote.remote_method(batch_size=None)
def sample_model(self) -> int:
if self._capacity == 0:
return ModelVersion.LATEST
else:
return np.random.randint(len(self._history))
def _bind(self) -> None:
for method in self._model.remote_methods:
batch_size = getattr(getattr(self._model, method), "__batch_size__",
None)
method_name, method_impl = self._wrap_remote_method(
method, batch_size)
self.__remote_methods__.append(method_name)
setattr(self, method_name, method_impl)
for i in range(self._capacity):
method_name, method_impl = self._wrap_remote_method(
method, batch_size, i)
self.__remote_methods__.append(method_name)
setattr(self, method_name, method_impl)
method_name, method_impl = self._wrap_remote_method(
method, batch_size, -i - 1)
setattr(self, method_name, method_impl)
self.__remote_methods__.append(method_name)
def _wrap_remote_method(
self,
method: str,
batch_size: Optional[int] = None,
version: int = ModelVersion.LATEST) -> Callable[..., Any]:
method_name = method
if version != ModelVersion.LATEST:
method_name += f"[{version}]"
method_impl = functools.partial(self._dispatch_model_call, version,
method)
setattr(method_impl, "__remote__", True)
if batch_size is not None:
setattr(method_impl, "__batch_size__", batch_size)
return method_name, method_impl
def _dispatch_model_call(self, version: int, method: str, *args,
**kwargs) -> Any:
model = self.model(version)
device = model.device
args = nested_utils.map_nested(lambda x: x.to(device), args)
kwargs = nested_utils.map_nested(lambda x: x.to(device), kwargs)
ret = getattr(model, method)(*args, **kwargs)
ret = nested_utils.map_nested(lambda x: x.cpu(), ret)
return ret
class RemoteModel(remote.Remote):
def __init__(self,
target: RemotableModel,
server_name: str,
server_addr: str,
name: Optional[str] = None,
version: int = ModelVersion.LATEST,
timeout: float = 60) -> None:
super().__init__(target, server_name, server_addr, name, timeout)
self._version = version
@property
def version(self) -> int:
return self._version
@version.setter
def version(self, version: int) -> None:
self._version = version
def sample_model(self,
num_samples: int = 1,
replacement: bool = False) -> torch.Tensor:
return self.client.sync(self.server_name,
self.remote_method_name("sample_model"),
num_samples, replacement)
async def async_sample_model(self,
num_samples: int = 1,
replacement: bool = False) -> torch.Tensor:
return await self.client.async_(self.server_name,
self.remote_method_name("sample_model"),
num_samples, replacement)
def _bind(self) -> None:
for method in self._remote_methods:
method_name = self.remote_method_name(method)
self._client_methods[method] = functools.partial(
self._remote_model_call, method_name)
self._client_methods["async_" + method] = functools.partial(
self._async_remote_model_call, method_name)
def _remote_model_call(self, method: str, *args, **kwargs) -> Any:
method_name = method
if self._version != ModelVersion.LATEST:
method_name += f"[{self._version}]"
return self.client.sync(self.server_name, method_name, *args, **kwargs)
def _async_remote_model_call(self, method: str, *args,
**kwargs) -> Awaitable:
method_name = method
if self._version != ModelVersion.LATEST:
method_name += f"[{self._version}]"
return self.client.async_(self.server_name, method_name, *args,
**kwargs)
class DownstreamModel(remote.Remote):
def __init__(self,
model: nn.Module,
server_name: str,
server_addr: str,
name: Optional[str] = None,
timeout: float = 60) -> None:
self._wrapped = model
self._reset(server_name, server_addr, name, timeout)
# TODO: Find a better way to implement this
def __getattribute__(self, attr: str) -> Any:
try:
return object.__getattribute__(self, attr)
except AttributeError:
return getattr(object.__getattribute__(self, "_wrapped"), attr)
@property
def wrapped(self) -> nn.Module:
return self._wrapped
def __call__(self, *args, **kwargs) -> Any:
return self.wrapped(*args, **kwargs)
def pull(self, version: int = ModelVersion.LATEST) -> None:
state_dict = self.client.sync(self.server_name,
self.remote_method_name("pull"), version)
self.wrapped.load_state_dict(state_dict)
async def async_pull(self, version: int = ModelVersion.LATEST) -> None:
state_dict = await self.client.async_(self.server_name,
self.remote_method_name("pull"),
version)
self.wrapped.load_state_dict(state_dict)
def push(self) -> None:
state_dict = self.wrapped.state_dict()
state_dict = nested_utils.map_nested(lambda x: x.cpu(), state_dict)
self.client.sync(self.server_name, self.remote_method_name("push"),
state_dict)
async def async_push(self) -> None:
state_dict = self.wrapped.state_dict()
state_dict = nested_utils.map_nested(lambda x: x.cpu(), state_dict)
await self.client.async_(self.server_name,
self.remote_method_name("push"), state_dict)
def release(self) -> None:
self.client.sync(self.server_name, self.remote_method_name("release"))
async def async_release(self) -> None:
await self.client.async_(self.server_name,
self.remote_method_name("release"))
def sample_model(self,
num_samples: int = 1,
replacement: bool = False) -> torch.Tensor:
return self.client.sync(self.server_name,
self.remote_method_name("sample_model"),
num_samples, replacement)
async def async_sample_model(self,
num_samples: int = 1,
replacement: bool = False) -> torch.Tensor:
return await self.client.async_(self.server_name,
self.remote_method_name("sample_model"),
num_samples, replacement)
def _bind(self) -> None:
pass
ModelLike = Union[nn.Module, RemotableModel, RemoteModel, DownstreamModel,
remote.Remote]
def make_remote_model(model: Union[RemotableModel, RemotableModelPool],
server: Server,
name: Optional[str] = None,
version: int = ModelVersion.LATEST,
timeout: float = 60) -> RemoteModel:
if isinstance(model, RemotableModelPool):
model = model.model()
return RemoteModel(model, server.name, server.addr, name, version, timeout)
def wrap_downstream_model(model: RemotableModel,
server: Server,
name: Optional[str] = None,
timeout: float = 60) -> DownstreamModel:
return DownstreamModel(model, server.name, server.addr, name, timeout)
| 11,670 | 35.358255 | 80 | py |
rlmeta | rlmeta-main/rlmeta/core/types.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from typing import Any, NamedTuple, Optional, Union
Tensor = Union[np.ndarray, torch.Tensor]
# NestedTensor is adapted from Acme's NestedTensor
# https://github.com/deepmind/acme/blob/df961057bcd2e1436d5f894ebced62d694225034/acme/types.py#L23
#
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NestedTensor = Any
# TimeStep is Inspired from dm_env's TimeStep:
# https://github.com/deepmind/dm_env/blob/abee135a07cc8e684173586dc8a20e696bbd40fb/dm_env/_environment.py#L25
#
# Copyright 2019 The dm_env Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TimeStep(NamedTuple):
observation: Any
reward: Optional[float] = None
terminated: bool = False
truncated: bool = False
info: Optional[Any] = None
class Action(NamedTuple):
action: Any
info: Optional[Any] = None
| 2,108 | 33.57377 | 109 | py |
rlmeta | rlmeta-main/rlmeta/core/rescalers.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from rlmeta.utils.running_stats import RunningMoments, RunningRMS
class Rescaler(nn.Module, abc.ABC):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.rescale(x)
def reset(self) -> None:
pass
def update(self, x: torch.Tensor) -> None:
pass
@abc.abstractmethod
def rescale(self, x: torch.Tensor) -> torch.Tensor:
"""
Do rescale for the input tensor.
"""
@abc.abstractmethod
def recover(self, x: torch.Tensor) -> torch.Tensor:
"""
Undo rescale for the input tensor.
"""
class IdentityRescaler(Rescaler):
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return x
def recover(self, x: torch.Tensor) -> torch.Tensor:
return x
class RMSRescaler(Rescaler):
def __init__(self,
size: Union[int, Tuple[int]],
eps: float = 1e-8,
dtype: torch.dtype = torch.float64) -> None:
super().__init__()
self._size = size
self._eps = eps
self._running_rms = RunningRMS(size, dtype=dtype)
@property
def size(self) -> Union[int, Tuple[int]]:
return self._size
@property
def eps(self) -> float:
return self._eps
def reset(self) -> None:
self._running_rms.reset()
def update(self, x: torch.Tensor) -> None:
self._running_rms.update(x)
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return (x * self._running_rms.rrms(self._eps)).to(x.dtype)
def recover(self, x: torch.Tensor) -> torch.Tensor:
return (x * self._running_rms.rms(self._eps)).to(x.dtype)
class MomentsRescaler(Rescaler):
def __init__(self,
size: Union[int, Tuple[int]],
ddof: int = 0,
eps: float = 1e-8,
dtype: torch.dtype = torch.float64) -> None:
super().__init__()
self._size = size
self._ddof = ddof
self._eps = eps
self._running_moments = RunningMoments(size, dtype=dtype)
@property
def size(self) -> Union[int, Tuple[int]]:
return self._size
@property
def ddof(self) -> int:
return self._ddof
@property
def eps(self) -> float:
return self._eps
def reset(self) -> None:
self._running_moments.reset()
def update(self, x: torch.Tensor) -> None:
self._running_moments.update(x)
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return x if self._running_moments.count() <= 1 else (
(x - self._running_moments.mean()) *
self._running_moments.rstd(self._ddof, self._eps)).to(x.dtype)
def recover(self, x: torch.Tensor) -> torch.Tensor:
return x if self._running_moments.count() <= 1 else (
(x * self._running_moments.std(self._ddof, self._eps)) +
self._running_moments.mean()).to(x.dtype)
class StdRescaler(Rescaler):
def __init__(self,
size: Union[int, Tuple[int]],
ddof: int = 0,
eps: float = 1e-8,
dtype: torch.dtype = torch.float64) -> None:
super().__init__()
self._size = size
self._ddof = ddof
self._eps = eps
self._running_moments = RunningMoments(size, dtype=dtype)
@property
def size(self) -> Union[int, Tuple[int]]:
return self._size
@property
def ddof(self) -> int:
return self._ddof
@property
def eps(self) -> float:
return self._eps
def reset(self) -> None:
self._running_moments.reset()
def update(self, x: torch.Tensor) -> None:
self._running_moments.update(x)
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return x if self._running_moments.count() <= 1 else (
x * self._running_moments.rstd(self._ddof, self._eps)).to(x.dtype)
def recover(self, x: torch.Tensor) -> torch.Tensor:
return x if self._running_moments.count() <= 1 else (
x * self._running_moments.std(self._ddof, self._eps)).to(x.dtype)
class SignedHyperbolicRescaler(Rescaler):
"""
Transformed Bellman Operator in https://arxiv.org/abs/1805.11593.
"""
def __init__(self, eps: float = 1e-3) -> None:
super().__init__()
self._eps = eps
@property
def eps(self) -> float:
return self._eps
def rescale(self, x: torch.Tensor) -> torch.Tensor:
return x.sign() * ((x.abs() + 1.0).sqrt() - 1.0) + self.eps * x
def recover(self, x: torch.Tensor) -> torch.Tensor:
if self._eps == 0.0:
return x.sign() * (x.square() + 2.0 * x.abs())
r = ((1.0 + 4.0 * self.eps *
(x.abs() + 1.0 + self.eps)).sqrt() - 1.0) / (2.0 * self.eps)
return x.sign() * (r.square() - 1.0)
| 5,106 | 26.605405 | 78 | py |
rlmeta | rlmeta-main/rlmeta/core/replay_buffer.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import time
import logging
from typing import Callable, Optional, Sequence, Tuple, Union
from rich.console import Console
import numpy as np
import torch
import rlmeta.core.remote as remote
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.core.launchable import Launchable
from rlmeta.core.server import Server
from rlmeta.core.types import Tensor, NestedTensor
from rlmeta.storage import Storage
from rlmeta.samplers import Sampler
console = Console()
# The design of ReplayBuffer is inspired from DeepMind's Reverb project.
#
# https://github.com/deepmind/reverb
#
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IndexType = Union[int, Tensor]
KeyType = Union[int, Tensor]
ValueType = Union[NestedTensor, Sequence[NestedTensor]]
class ReplayBuffer(remote.Remotable, Launchable):
def __init__(self,
storage: Storage,
sampler: Sampler,
identifier: Optional[str] = None) -> None:
remote.Remotable.__init__(self, identifier)
self._storage = storage
self._sampler = sampler
def __len__(self) -> int:
return len(self._storage)
def __getitem__(self, index: IndexType) -> Tuple[KeyType, ValueType]:
return self._storage.at(index)
@property
def capacity(self) -> int:
return self._storage.capacity
@property
def size(self) -> int:
return self._storage.size
def init_launching(self) -> None:
pass
def init_execution(self) -> None:
pass
@remote.remote_method(batch_size=None)
def info(self) -> Tuple[int, int]:
return self.size, self.capacity
@remote.remote_method(batch_size=None)
def reset(self) -> None:
self._storage.reset()
self._sampler.reset()
@remote.remote_method(batch_size=None)
def clear(self) -> None:
self._storage.clear()
self._sampler.reset()
@remote.remote_method(batch_size=None)
def at(self, index: IndexType) -> Tuple[KeyType, ValueType]:
return self._storage.at(index)
@remote.remote_method(batch_size=None)
def get(self, key: KeyType) -> ValueType:
return self._storage.get(key)
@remote.remote_method(batch_size=None)
def append(self, data: NestedTensor, priority: float = 1.0) -> int:
new_key, old_key = self._storage.append(data)
self._sampler.insert(new_key, priority)
if old_key is not None:
self._sampler.delete(old_key)
return new_key
@remote.remote_method(batch_size=None)
def extend(self,
data: Sequence[NestedTensor],
priorities: Union[float, Tensor] = 1.0,
stacked: bool = False) -> torch.Tensor:
new_keys, old_keys = self._storage.extend(data, stacked)
if isinstance(priorities, torch.Tensor):
priorities = priorities.numpy().astype(np.float64)
elif isinstance(priorities, np.ndarray):
priorities = priorities.astype(np.float64)
self._sampler.insert(new_keys, priorities)
self._sampler.delete(old_keys)
return torch.from_numpy(new_keys)
@remote.remote_method(batch_size=None)
def sample(
self,
num_samples: int,
replacement: bool = False
) -> Tuple[torch.Tensor, NestedTensor, torch.Tensor]:
keys, probabilities = self._sampler.sample(num_samples, replacement)
values = self._storage.get(keys)
return torch.from_numpy(keys), values, torch.from_numpy(probabilities)
@remote.remote_method(batch_size=None)
def update(self, key: Union[int, Tensor], priority: Union[float,
Tensor]) -> None:
if isinstance(key, torch.Tensor):
key = key.numpy()
if isinstance(priority, torch.Tensor):
priority = priority.numpy().astype(np.float64)
elif isinstance(priority, np.ndarray):
priority = priority.astype(np.float64)
self._sampler.update(key, priority)
class RemoteReplayBuffer(remote.Remote):
def __init__(self,
target: ReplayBuffer,
server_name: str,
server_addr: str,
name: Optional[str] = None,
prefetch: int = 0,
timeout: float = 60) -> None:
super().__init__(target, server_name, server_addr, name, timeout)
self._prefetch = prefetch
self._futures = collections.deque()
self._server_name = server_name
self._server_addr = server_addr
def __repr__(self):
return (f"RemoteReplayBuffer(server_name={self._server_name}, " +
f"server_addr={self._server_addr})")
@property
def prefetch(self) -> Optional[int]:
return self._prefetch
def sample(
self,
num_samples: int,
replacement: bool = False
) -> Union[NestedTensor, Tuple[NestedTensor, torch.Tensor, torch.Tensor,
torch.Tensor]]:
if len(self._futures) > 0:
ret = self._futures.popleft().result()
else:
ret = self.client.sync(self.server_name,
self.remote_method_name("sample"),
num_samples, replacement)
while len(self._futures) < self.prefetch:
fut = self.client.async_(self.server_name,
self.remote_method_name("sample"),
num_samples, replacement)
self._futures.append(fut)
return ret
async def async_sample(
self,
num_samples: int,
replacement: bool = False
) -> Union[NestedTensor, Tuple[NestedTensor, torch.Tensor, torch.Tensor,
torch.Tensor]]:
if len(self._futures) > 0:
ret = await self._futures.popleft()
else:
ret = await self.client.async_(self.server_name,
self.remote_method_name("sample"),
num_samples, replacement)
while len(self._futures) < self.prefetch:
fut = self.client.async_(self.server_name,
self.remote_method_name("sample"),
num_samples, replacement)
self._futures.append(fut)
return ret
def warm_up(self, learning_starts: Optional[int] = None) -> None:
size, capacity = self.info()
target_size = capacity
if learning_starts is not None:
target_size = min(target_size, learning_starts)
width = len(str(capacity)) + 1
while size < target_size:
time.sleep(1)
size, capacity = self.info()
console.log("Warming up replay buffer: " +
f"[{size: {width}d} / {capacity} ]")
ReplayBufferLike = Union[ReplayBuffer, RemoteReplayBuffer]
def make_remote_replay_buffer(target: ReplayBuffer,
server: Server,
name: Optional[str] = None,
prefetch: int = 0,
timeout: float = 60):
return RemoteReplayBuffer(target, server.name, server.addr, name, prefetch,
timeout)
| 8,167 | 33.464135 | 79 | py |
rlmeta | rlmeta-main/rlmeta/envs/gym_wrapper.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import numpy as np
import gym
from gym.wrappers.frame_stack import LazyFrames
from gym.wrappers.step_api_compatibility import StepAPICompatibility
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import Tensor, NestedTensor
from rlmeta.envs.env import Env
class GymWrapper(Env):
def __init__(self,
env: gym.Env,
observation_fn: Optional[Callable[..., Tensor]] = None,
old_step_api: bool = False) -> None:
super(GymWrapper, self).__init__()
self._env = StepAPICompatibility(
env, output_truncation_bool=True) if old_step_api else env
self._action_space = self._env.action_space
self._observation_space = self._env.observation_space
self._reward_range = self._env.reward_range
self._metadata = self._env.metadata
self._old_step_api = old_step_api
if observation_fn is not None:
self._observation_fn = observation_fn
else:
self._observation_fn = data_utils.to_torch
@property
def env(self):
return self._env
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
@property
def reward_range(self):
return self._reward_range
@property
def metadata(self):
return self._metadata
def reset(self, *args, seed: Optional[int] = None, **kwargs) -> TimeStep:
# TODO: Clean up this function when most envs are fully migrated to the
# new OpenAI Gym API.
if self._old_step_api:
if seed is not None:
self._env.seed(seed)
obs = self._env.reset(*args, **kwargs)
info = None
else:
obs, info = self._env.reset(*args, seed=seed, **kwargs)
obs = nested_utils.map_nested(
lambda x: self._observation_fn(
np.asarray(x) if isinstance(x, LazyFrames) else x), obs)
return TimeStep(obs, info=info)
def step(self, action: Action) -> TimeStep:
act = action.action
if not isinstance(act, int):
act = act.item()
obs, reward, terminated, truncated, info = self._env.step(act)
obs = nested_utils.map_nested(
lambda x: self._observation_fn(
np.asarray(x) if isinstance(x, LazyFrames) else x), obs)
return TimeStep(obs, reward, terminated, truncated, info)
def close(self) -> None:
self._env.close()
| 2,873 | 30.582418 | 79 | py |
rlmeta | rlmeta-main/rlmeta/models/actor_critic.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlmeta.models.utils import MLP
class DiscreteActorCriticHead(nn.Module):
def __init__(self, input_size: int, hidden_sizes: Sequence[int],
num_actions: int) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._num_actions = num_actions
self._mlp_p = MLP(input_size, [*hidden_sizes, num_actions])
self._mlp_v = MLP(input_size, [*hidden_sizes, 1])
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
p = self._mlp_p(x)
logpi = F.log_softmax(p, dim=-1)
v = self._mlp_v(x)
return logpi, v
class DiscreteActorCriticRNDHead(nn.Module):
def __init__(self, input_size: int, hidden_sizes: Sequence[int],
num_actions: int) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._num_actions = num_actions
self._mlp_p = MLP(input_size, [*hidden_sizes, num_actions])
self._mlp_ext_v = MLP(input_size, [*hidden_sizes, 1])
self._mlp_int_v = MLP(input_size, [*hidden_sizes, 1])
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
p = self._mlp_p(x)
logpi = F.log_softmax(p, dim=-1)
ext_v = self._mlp_ext_v(x)
int_v = self._mlp_int_v(x)
return logpi, ext_v, int_v
| 1,699 | 32.333333 | 76 | py |
rlmeta | rlmeta-main/rlmeta/models/utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence
import torch
import torch.nn as nn
# The MLP class is inspired from the MLP class in DeepMind's haiku lib.
# https://github.com/deepmind/dm-haiku/blob/6f2769e8c8dd35b3fc0e66905c877debea7d525f/haiku/_src/nets/mlp.py#L38
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MLP(nn.Module):
def __init__(self,
input_size: int,
hidden_sizes: Sequence[int],
activate_last: bool = False) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._activate_final = activate_last
prev_size = input_size
last_size = hidden_sizes.pop()
layers = []
for hidden_size in hidden_sizes:
layers.append(nn.Linear(prev_size, hidden_size))
layers.append(nn.ReLU())
prev_size = hidden_size
layers.append(nn.Linear(prev_size, last_size))
if activate_last:
layers.append(nn.ReLU())
self._layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._layers(x)
class ResidualBlock(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: int = 3) -> None:
super().__init__()
self._in_channels = in_channels
self._out_channels = out_channels
self._kernel_size = kernel_size
layers = []
layers.append(nn.ReLU())
layers.append(
nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
padding="same"))
layers.append(nn.ReLU())
layers.append(
nn.Conv2d(out_channels,
out_channels,
kernel_size=kernel_size,
padding="same"))
self._layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self._layers(x)
| 2,810 | 32.464286 | 111 | py |
rlmeta | rlmeta-main/rlmeta/models/dqn.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence
import torch
import torch.nn as nn
from rlmeta.models.utils import MLP
class DQNHead(nn.Module):
def __init__(self, input_size: int, hidden_sizes: Sequence[int],
num_actions: int) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._num_actions = num_actions
self._mlp = MLP(input_size, [*hidden_sizes, num_actions])
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._mlp(x)
class DuelingDQNHead(nn.Module):
def __init__(self, input_size: int, hidden_sizes: Sequence[int],
num_actions: int) -> None:
super().__init__()
self._input_size = input_size
self._hidden_sizes = hidden_sizes
self._num_actions = num_actions
self._mlp_a = MLP(input_size, [*hidden_sizes, num_actions])
self._mlp_v = MLP(input_size, [*hidden_sizes, 1])
def forward(self, x: torch.Tensor) -> torch.Tensor:
a = self._mlp_a(x)
v = self._mlp_v(x)
return v + a - a.mean(dim=-1, keepdim=True)
| 1,306 | 29.395349 | 68 | py |
rlmeta | rlmeta-main/rlmeta/models/atari.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from rlmeta.models.utils import ResidualBlock
class NatureCNNBackbone(nn.Module):
def __init__(self) -> None:
super().__init__()
layers = []
layers.append(nn.Conv2d(4, 32, kernel_size=8, stride=4))
layers.append(nn.ReLU())
layers.append(nn.Conv2d(32, 64, kernel_size=4, stride=2))
layers.append(nn.ReLU())
layers.append(nn.Conv2d(64, 64, kernel_size=3, stride=1))
layers.append(nn.ReLU())
layers.append(nn.Flatten())
self._layers = nn.Sequential(*layers)
@property
def output_size(self) -> int:
return 3136
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._layers(x)
class ImpalaCNNBackbone(nn.Module):
def __init__(self) -> None:
super().__init__()
layers = []
layers.append(self._conv_block(4, 16))
layers.append(self._conv_block(16, 32))
layers.append(self._conv_block(32, 32))
layers.append(nn.ReLU())
layers.append(nn.Flatten())
self._layers = nn.Sequential(*layers)
@property
def output_size(self) -> int:
return 3872
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._layers(x)
def _conv_block(self, in_channels: int, out_channels: int) -> nn.Module:
layers = []
layers.append(
nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding="same"))
layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
layers.append(ResidualBlock(out_channels, out_channels))
layers.append(ResidualBlock(out_channels, out_channels))
return nn.Sequential(*layers)
| 1,990 | 28.716418 | 76 | py |
rlmeta | rlmeta-main/rlmeta/agents/ppo/ppo_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Tuple
import torch
import torch.nn as nn
from rlmeta.core.model import RemotableModel
class PPOModel(RemotableModel):
@abc.abstractmethod
def forward(self, obs: torch.Tensor, *args,
**kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Forward function for PPO model.
Args:
obs: A torch.Tensor for observation.
Returns:
A tuple for pytorch tensor contains [logpi, v].
logpi: The log probility for each action.
v: The value of the current state.
"""
@abc.abstractmethod
def act(self, obs: torch.Tensor, deterministic_policy: torch.Tensor, *args,
**kwargs) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Act function will be called remotely by the agent.
This function should upload the input to the device and download the
output to cpu.
Args:
obs: A torch.Tensor for observation.
deterministic_policy: A torch.Tensor for whether to use
deterministic_policy.
Returns:
A tuple for pytorch tensor contains (action, logpi, v).
action: The final action selected by the model.
logpi: The log probility for each action.
v: The value of the current state.
"""
| 1,555 | 28.358491 | 79 | py |
rlmeta | rlmeta-main/rlmeta/agents/ppo/ppo_rnd_agent.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Dict, List, Optional, Sequence
import torch
import torch.nn as nn
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.ppo.ppo_agent import PPOAgent
from rlmeta.core.controller import ControllerLike
from rlmeta.core.model import ModelLike
from rlmeta.core.replay_buffer import ReplayBufferLike
from rlmeta.core.rescalers import StdRescaler
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import Tensor, NestedTensor
class PPORNDAgent(PPOAgent):
def __init__(
self,
model: ModelLike,
deterministic_policy: bool = False,
replay_buffer: Optional[ReplayBufferLike] = None,
controller: Optional[ControllerLike] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
batch_size: int = 128,
max_grad_norm: float = 1.0,
gamma: float = 0.99,
gae_lambda: float = 0.95,
ratio_clipping_eps: float = 0.2,
value_clipping_eps: Optional[float] = 0.2,
intrinsic_advantage_coeff: float = 0.5,
vf_loss_coeff: float = 0.5,
entropy_coeff: float = 0.01,
rescale_reward: bool = True,
max_abs_reward: float = 10.0,
normalize_advantage: bool = True,
learning_starts: Optional[int] = None,
model_push_period: int = 10,
local_batch_size: int = 1024,
collate_fn: Optional[Callable[[Sequence[NestedTensor]],
NestedTensor]] = None
) -> None:
super().__init__(model, deterministic_policy, replay_buffer, controller,
optimizer, batch_size, max_grad_norm, gamma,
gae_lambda, ratio_clipping_eps, value_clipping_eps,
vf_loss_coeff, entropy_coeff, rescale_reward,
max_abs_reward, normalize_advantage, learning_starts,
model_push_period, local_batch_size)
self._intrinsic_advantage_coeff = intrinsic_advantage_coeff
self._reward_rescaler = None
self._ext_reward_rescaler = StdRescaler(
size=1) if rescale_reward else None
self._int_reward_rescaler = StdRescaler(
size=1) if rescale_reward else None
self._collate_fn = torch.stack if collate_fn is None else collate_fn
def act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, logpi, ext_v, int_v = self._model.act(
obs, self._deterministic_policy)
return Action(action,
info={
"logpi": logpi,
"ext_v": ext_v,
"int_v": int_v,
})
async def async_act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, logpi, ext_v, int_v = await self._model.async_act(
obs, self._deterministic_policy)
return Action(action,
info={
"logpi": logpi,
"ext_v": ext_v,
"int_v": int_v,
})
def observe(self, action: Action, next_timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
act, info = action
obs, reward, terminated, truncated, _ = next_timestep
cur = self._trajectory[-1]
cur["reward"] = reward
cur["action"] = act
cur["logpi"] = info["logpi"]
cur["ext_v"] = info["ext_v"]
cur["int_v"] = info["int_v"]
self._trajectory.append({
"obs": obs,
"terminated": terminated,
"truncated": truncated,
})
async def async_observe(self, action: Action,
next_timestep: TimeStep) -> None:
self.observe(action, next_timestep)
def update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_ext_v = torch.zeros(1)
last_int_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_ext_v, last_int_v = self._model.act(
last_step["obs"], self._deterministic_policy)
last_step["ext_v"] = last_ext_v
last_step["int_v"] = last_int_v
replay = self._make_replay()
self._send_replay(replay)
self._trajectory.clear()
async def async_update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_ext_v = torch.zeros(1)
last_int_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_ext_v, last_int_v = await self._model.async_act(
last_step["obs"], self._deterministic_policy)
last_step["ext_v"] = last_ext_v
last_step["int_v"] = last_int_v
replay = self._make_replay()
await self._async_send_replay(replay)
self._trajectory.clear()
def _make_replay(self) -> List[NestedTensor]:
next_obs = [
self._trajectory[i]["obs"] for i in range(1, len(self._trajectory))
]
int_rewards = self._compute_intrinsic_reward(next_obs)
return self._make_replay_impl(int_rewards)
async def _async_make_replay(self) -> List[NestedTensor]:
next_obs = [
self._trajectory[i]["obs"] for i in range(1, len(self._trajectory))
]
int_rewards = await self._async_compute_intrinsic_reward(next_obs)
return self._make_replay_impl(int_rewards)
def _make_replay_impl(
self,
intrinsic_rewards: Sequence[NestedTensor]) -> List[NestedTensor]:
ext_adv, ext_ret = self._compute_gae_and_return(
[x["ext_v"] for x in self._trajectory],
[x["reward"] for x in self._trajectory], self._ext_reward_rescaler)
int_adv, int_ret = self._compute_gae_and_return(
[x["int_v"] for x in self._trajectory], intrinsic_rewards,
self._int_reward_rescaler)
self._trajectory.pop()
for cur, ext_a, ext_r, int_a, int_r in zip(self._trajectory, ext_adv,
ext_ret, int_adv, int_ret):
cur["ext_gae"] = ext_a
cur["ext_ret"] = ext_r
cur["int_gae"] = int_a
cur["int_ret"] = int_r
cur.pop("reward")
cur.pop("terminated")
cur.pop("truncated")
return self._trajectory
def _compute_intrinsic_reward(
self, next_obs: Sequence[NestedTensor]) -> List[torch.Tensor]:
int_rewards = []
n = len(next_obs)
next_obs = nested_utils.collate_nested(self._collate_fn, next_obs)
for i in range(0, n, self._local_batch_size):
batch = nested_utils.map_nested(
lambda x, i=i: x[i:i + self._local_batch_size], next_obs)
cur_rewards = self._model.intrinsic_reward(batch)
int_rewards.extend(torch.unbind(cur_rewards))
int_rewards.append(torch.zeros(1)) # Padding for last step.
return int_rewards
async def _async_compute_intrinsic_reward(
self, obs: Sequence[NestedTensor]) -> List[torch.Tensor]:
int_rewards = []
n = len(obs)
obs = nested_utils.collate_nested(self._collate_fn, obs)
for i in range(0, n, self._local_batch_size):
batch = nested_utils.map_nested(
lambda x, i=i: x[i:i + self._local_batch_size], obs)
cur_rewards = await self._model.async_intrinsic_reward(batch)
int_rewards.extend(torch.unbind(cur_rewards))
int_rewards.append(torch.zeros(1)) # Padding for last step
return int_rewards
def _train_step(self, batch: NestedTensor) -> Dict[str, float]:
batch = nested_utils.map_nested(lambda x: x.to(self._model.device),
batch)
self._optimizer.zero_grad()
obs = batch["obs"]
act = batch["action"]
ext_adv = batch["ext_gae"]
ext_ret = batch["ext_ret"]
int_adv = batch["int_gae"]
int_ret = batch["int_ret"]
behavior_logpi = batch["logpi"]
behavior_ext_v = batch["ext_v"]
behavior_int_v = batch["int_v"]
logpi, ext_v, int_v = self._model_forward(obs)
adv = ext_adv + self._intrinsic_advantage_coeff * int_adv
policy_loss, ratio = self._policy_loss(logpi.gather(dim=-1, index=act),
behavior_logpi, adv)
ext_value_loss = self._value_loss(ext_ret, ext_v, behavior_ext_v)
int_value_loss = self._value_loss(int_ret, int_v, behavior_int_v)
value_loss = ext_value_loss + int_value_loss
entropy = self._entropy(logpi)
rnd_loss = self._rnd_loss(obs)
loss = policy_loss + (self._vf_loss_coeff * value_loss) - (
self._entropy_coeff * entropy) + rnd_loss
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(self._model.parameters(),
self._max_grad_norm)
self._optimizer.step()
return {
"ext_return": ext_ret.detach().mean().item(),
"int_return": int_ret.detach().mean().item(),
"policy_ratio": ratio.detach().mean().item(),
"policy_loss": policy_loss.detach().mean().item(),
"ext_value_loss": ext_value_loss.detach().mean().item(),
"int_value_loss": int_value_loss.detach().mean().item(),
"value_loss": value_loss.detach().mean().item(),
"entropy": entropy.detach().mean().item(),
"rnd_loss": rnd_loss.detach().mean().item(),
"loss": loss.detach().mean().item(),
"grad_norm": grad_norm.detach().mean().item(),
}
def _rnd_loss(self, next_obs: torch.Tensor) -> torch.Tensor:
return self._model.rnd_loss(next_obs)
| 10,632 | 39.276515 | 80 | py |
rlmeta | rlmeta-main/rlmeta/agents/ppo/ppo_rnd_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Tuple
import torch
import torch.nn as nn
from rlmeta.core.model import RemotableModel
class PPORNDModel(RemotableModel):
@abc.abstractmethod
def forward(self, obs: torch.Tensor, *args,
**kwargs) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Forward function for PPO model.
Args:
obs: A torch.Tensor for observation.
Returns:
A tuple for pytorch tensor contains [logpi, v].
logpi: The log probility for each action.
v: The value of the current state.
"""
@abc.abstractmethod
def act(
self, obs: torch.Tensor, deterministic_policy: torch.Tensor, *args,
**kwargs
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Act function will be called remotely by the agent.
This function should upload the input to the device and download the
output to cpu.
Args:
obs: A torch.Tensor for observation.
deterministic_policy: A torch.Tensor for whether to use
deterministic_policy.
Returns:
A tuple for pytorch tensor contains (action, logpi, ext_v, int_v).
action: The final action selected by the model.
logpi: The log probility for each action.
ext_v: The extrinsic value of the current state.
int_v: The intrinsic value of the current state.
"""
@abc.abstractmethod
def intrinsic_reward(self, obs: torch.Tensor) -> torch.Tensor:
"""
"""
@abc.abstractmethod
def rnd_loss(self, obs: torch.Tensor) -> torch.Tensor:
"""
"""
| 1,906 | 27.893939 | 78 | py |
rlmeta | rlmeta-main/rlmeta/agents/ppo/ppo_agent.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from rich.console import Console
from rich.progress import track
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.agent import Agent
from rlmeta.core.controller import Controller, ControllerLike, Phase
from rlmeta.core.model import ModelLike
from rlmeta.core.replay_buffer import ReplayBufferLike
from rlmeta.core.rescalers import Rescaler, StdRescaler
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import Tensor, NestedTensor
from rlmeta.utils.stats_dict import StatsDict
console = Console()
class PPOAgent(Agent):
def __init__(self,
model: ModelLike,
deterministic_policy: bool = False,
replay_buffer: Optional[ReplayBufferLike] = None,
controller: Optional[ControllerLike] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
batch_size: int = 512,
max_grad_norm: float = 1.0,
gamma: float = 0.99,
gae_lambda: float = 0.95,
ratio_clipping_eps: float = 0.2,
value_clipping_eps: Optional[float] = 0.2,
vf_loss_coeff: float = 0.5,
entropy_coeff: float = 0.01,
rescale_reward: bool = True,
max_abs_reward: float = 10.0,
normalize_advantage: bool = True,
learning_starts: Optional[int] = None,
model_push_period: int = 10,
local_batch_size: int = 1024) -> None:
super().__init__()
self._model = model
self._deterministic_policy = torch.tensor([deterministic_policy])
self._replay_buffer = replay_buffer
self._controller = controller
self._optimizer = optimizer
self._batch_size = batch_size
self._max_grad_norm = max_grad_norm
self._gamma = gamma
self._gae_lambda = gae_lambda
self._ratio_clipping_eps = ratio_clipping_eps
self._value_clipping_eps = value_clipping_eps
self._vf_loss_coeff = vf_loss_coeff
self._entropy_coeff = entropy_coeff
self._rescale_reward = rescale_reward
self._max_abs_reward = max_abs_reward
self._reward_rescaler = StdRescaler(size=1) if rescale_reward else None
self._normalize_advantage = normalize_advantage
self._learning_starts = learning_starts
self._model_push_period = model_push_period
self._local_batch_size = local_batch_size
self._trajectory = []
self._step_counter = 0
self._eval_executor = None
def reset(self) -> None:
self._step_counter = 0
def act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, logpi, v = self._model.act(obs, self._deterministic_policy)
return Action(action, info={"logpi": logpi, "v": v})
async def async_act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, logpi, v = await self._model.async_act(
obs, self._deterministic_policy)
return Action(action, info={"logpi": logpi, "v": v})
def observe_init(self, timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
obs, _, terminated, truncated, _ = timestep
if terminated or truncated:
self._trajectory.clear()
else:
self._trajectory = [{
"obs": obs,
"terminated": terminated,
"truncated": truncated,
}]
async def async_observe_init(self, timestep: TimeStep) -> None:
self.observe_init(timestep)
def observe(self, action: Action, next_timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
act, info = action
obs, reward, terminated, truncated, _ = next_timestep
cur = self._trajectory[-1]
cur["action"] = act
cur["logpi"] = info["logpi"]
cur["v"] = info["v"]
cur["reward"] = reward
self._trajectory.append({
"obs": obs,
"terminated": terminated,
"truncated": truncated,
})
async def async_observe(self, action: Action,
next_timestep: TimeStep) -> None:
self.observe(action, next_timestep)
def update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_v = self._model.act(last_step["obs"],
self._deterministic_policy)
last_step["v"] = last_v
replay = self._make_replay()
self._send_replay(replay)
self._trajectory.clear()
async def async_update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_v = await self._model.async_act(
last_step["obs"], self._deterministic_policy)
last_step["v"] = last_v
replay = self._make_replay()
await self._async_send_replay(replay)
self._trajectory.clear()
def train(self,
num_steps: int,
keep_evaluation_loops: bool = False) -> StatsDict:
phase = self._controller.phase()
if keep_evaluation_loops:
self._controller.set_phase(Phase.TRAIN | phase)
else:
self._controller.set_phase(Phase.TRAIN)
self._replay_buffer.warm_up(self._learning_starts)
stats = StatsDict()
console.log(f"Training for num_steps = {num_steps}")
for _ in track(range(num_steps), description="Training..."):
t0 = time.perf_counter()
_, batch, _ = self._replay_buffer.sample(self._batch_size)
t1 = time.perf_counter()
step_stats = self._train_step(batch)
t2 = time.perf_counter()
time_stats = {
"sample_data_time/ms": (t1 - t0) * 1000.0,
"batch_learn_time/ms": (t2 - t1) * 1000.0,
}
stats.extend(step_stats)
stats.extend(time_stats)
self._step_counter += 1
if self._step_counter % self._model_push_period == 0:
self._model.push()
# Release current model to stable.
self._model.push()
self._model.release()
episode_stats = self._controller.stats(Phase.TRAIN)
stats.update(episode_stats)
self._controller.reset_phase(Phase.TRAIN)
return stats
def eval(self,
num_episodes: Optional[int] = None,
keep_training_loops: bool = False,
non_blocking: bool = False) -> Union[StatsDict, Future]:
if not non_blocking:
return self._eval(num_episodes, keep_training_loops)
if self._eval_executor is None:
self._eval_executor = ThreadPoolExecutor(max_workers=1)
return self._eval_executor.submit(self._eval, num_episodes,
keep_training_loops)
def _make_replay(self) -> List[NestedTensor]:
adv, ret = self._compute_gae_and_return(
[x["v"] for x in self._trajectory],
[x["reward"] for x in self._trajectory], self._reward_rescaler)
self._trajectory.pop()
for cur, a, r in zip(self._trajectory, adv, ret):
cur["gae"] = a
cur["ret"] = r
cur.pop("reward")
cur.pop("terminated")
cur.pop("truncated")
return self._trajectory
def _send_replay(self, replay: List[NestedTensor]) -> None:
batch = []
while replay:
batch.append(replay.pop())
if len(batch) >= self._local_batch_size:
self._replay_buffer.extend(batch)
batch.clear()
if batch:
self._replay_buffer.extend(batch)
batch.clear()
async def _async_send_replay(self, replay: List[NestedTensor]) -> None:
batch = []
while replay:
batch.append(replay.pop())
if len(batch) >= self._local_batch_size:
await self._replay_buffer.async_extend(batch)
batch.clear()
if batch:
await self._replay_buffer.async_extend(batch)
batch.clear()
def _compute_gae_and_return(
self,
val: Sequence[Union[float, torch.Tensor]],
rew: Sequence[Union[float, torch.Tensor]],
reward_rescaler: Optional[Rescaler] = None
) -> Tuple[Iterable[torch.Tensor], Iterable[torch.Tensor]]:
n = len(val)
v = val[-1]
g = torch.zeros(1)
gae = torch.zeros(1)
adv = []
ret = []
for i in range(n - 2, -1, -1):
value, reward = val[i], rew[i]
if not isinstance(reward, torch.Tensor):
reward = torch.tensor([reward], dtype=torch.float32)
if reward_rescaler is not None:
g = reward + self._gamma * g
reward_rescaler.update(g)
reward = reward_rescaler.rescale(reward)
if self._max_abs_reward is not None:
reward.clamp_(-self._max_abs_reward, self._max_abs_reward)
delta = reward + self._gamma * v - value
v = value
gae = delta + self._gamma * self._gae_lambda * gae
adv.append(gae)
ret.append(gae + v)
return reversed(adv), reversed(ret)
def _train_step(self, batch: NestedTensor) -> Dict[str, float]:
device = self._model.device
batch = nested_utils.map_nested(lambda x: x.to(device), batch)
self._optimizer.zero_grad()
obs = batch["obs"]
act = batch["action"]
adv = batch["gae"]
ret = batch["ret"]
behavior_logpi = batch["logpi"]
behavior_v = batch["v"]
logpi, v = self._model_forward(obs)
policy_loss, ratio = self._policy_loss(logpi.gather(dim=-1, index=act),
behavior_logpi, adv)
value_loss = self._value_loss(ret, v, behavior_v)
entropy = self._entropy(logpi)
loss = policy_loss + (self._vf_loss_coeff *
value_loss) - (self._entropy_coeff * entropy)
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(self._model.parameters(),
self._max_grad_norm)
self._optimizer.step()
return {
"return": ret.detach().mean().item(),
"policy_ratio": ratio.detach().mean().item(),
"policy_loss": policy_loss.detach().mean().item(),
"value_loss": value_loss.detach().mean().item(),
"entropy": entropy.detach().mean().item(),
"loss": loss.detach().mean().item(),
"grad_norm": grad_norm.detach().mean().item(),
}
def _model_forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
return self._model(obs)
def _policy_loss(self, logpi: torch.Tensor, behavior_logpi: torch.Tensor,
adv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if self._normalize_advantage:
std, mean = torch.std_mean(adv, unbiased=False)
adv = (adv - mean) / std
ratio = (logpi - behavior_logpi).exp()
clipped_ratio = ratio.clamp(1.0 - self._ratio_clipping_eps,
1.0 + self._ratio_clipping_eps)
surr1 = ratio * adv
surr2 = clipped_ratio * adv
policy_loss = -torch.min(surr1, surr2).mean()
return policy_loss, ratio
def _value_loss(self,
ret: torch.Tensor,
v: torch.Tensor,
behavior_v: Optional[torch.Tensor] = None) -> torch.Tensor:
if self._value_clipping_eps is None:
return F.mse_loss(v, ret)
clipped_v = behavior_v + torch.clamp(
v - behavior_v, -self._value_clipping_eps, self._value_clipping_eps)
vf1 = F.mse_loss(v, ret, reduction="none")
vf2 = F.mse_loss(clipped_v, ret, reduction="none")
return torch.max(vf1, vf2).mean()
def _entropy(self, logpi: torch.Tensor) -> torch.Tensor:
return -(logpi.exp() * logpi).sum(dim=-1).mean()
def _eval(self,
num_episodes: int,
keep_training_loops: bool = False) -> StatsDict:
phase = self._controller.phase()
if keep_training_loops:
self._controller.set_phase(Phase.EVAL | phase)
else:
self._controller.set_phase(Phase.EVAL)
self._controller.reset_phase(Phase.EVAL, limit=num_episodes)
while self._controller.count(Phase.EVAL) < num_episodes:
time.sleep(1)
stats = self._controller.stats(Phase.EVAL)
self._controller.set_phase(phase)
return stats
| 13,953 | 36.210667 | 80 | py |
rlmeta | rlmeta-main/rlmeta/agents/dqn/dqn_model.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import Optional, Tuple
import torch
import torch.nn as nn
from rlmeta.core.model import RemotableModel
from rlmeta.core.types import NestedTensor
class DQNModel(RemotableModel):
@abc.abstractmethod
def forward(self, observation: torch.Tensor, *args,
**kwargs) -> torch.Tensor:
"""
Forward function for DQN model.
Args:
observation: A torch.Tensor for observation.
Returns:
q: The Q(s, a) value for each action in the current state.
"""
@abc.abstractmethod
def q(self, s: torch.Tensor, a: torch.Tensor) -> torch.Tensor:
"""
Q function for DQN model.
Args:
s: A torch.Tensor for observation.
a: A torch.Tensor for action.
Returns:
q: The Q(s, a) value for each action in the current state.
"""
@abc.abstractmethod
def act(self, observation: NestedTensor, eps: torch.Tensor, *args,
**kwargs) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Act function will be called remotely by the agent.
This function should upload the input to the device and download the
output to cpu.
Args:
observation: A torch.Tensor for observation.
eps: A torch.Tensor for eps value in epsilon-greedy policy.
Returns:
action: The final action selected by the model.
q: The Q(s, a) value of the current state and action.
v: The value estimation of current state by max(Q(s, a)).
"""
@abc.abstractmethod
def sync_target_net(self) -> None:
"""
"""
def td_error(self, observation: NestedTensor, action: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
return target - self.q(observation, action)
| 2,058 | 28 | 76 | py |
rlmeta | rlmeta-main/rlmeta/agents/dqn/apex_dqn_agent.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Callable, Dict, List, Optional, Sequence, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from rich.console import Console
from rich.progress import track
import rlmeta.utils.data_utils as data_utils
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.agents.agent import Agent, AgentFactory
from rlmeta.core.controller import Controller, ControllerLike, Phase
from rlmeta.core.model import ModelLike
from rlmeta.core.replay_buffer import ReplayBufferLike
from rlmeta.core.rescalers import SignedHyperbolicRescaler
from rlmeta.core.types import Action, TimeStep
from rlmeta.core.types import NestedTensor
from rlmeta.utils.stats_dict import StatsDict
from rlmeta.utils.running_stats import RunningMoments
console = Console()
class ApexDQNAgent(Agent):
def __init__(
self,
model: ModelLike,
eps: float = 0.1,
replay_buffer: Optional[ReplayBufferLike] = None,
controller: Optional[ControllerLike] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
batch_size: int = 512,
max_grad_norm: float = 40.0,
n_step: int = 1,
gamma: float = 0.99,
importance_sampling_exponent: float = 0.4,
max_abs_reward: Optional[int] = None,
rescale_value: bool = False,
value_clipping_eps: Optional[float] = 0.2,
fr_kappa: Optional[float] = 1.0,
target_sync_period: Optional[int] = None,
learning_starts: Optional[int] = None,
model_push_period: int = 10,
additional_models_to_update: Optional[List[ModelLike]] = None
) -> None:
super().__init__()
self._model = model
self._eps = torch.tensor([eps], dtype=torch.float32)
self._replay_buffer = replay_buffer
self._controller = controller
self._optimizer = optimizer
self._batch_size = batch_size
self._max_grad_norm = max_grad_norm
self._n_step = n_step
self._gamma = gamma
self._gamma_pow = tuple(gamma**i for i in range(n_step + 1))
self._importance_sampling_exponent = importance_sampling_exponent
self._max_abs_reward = max_abs_reward
self._value_clipping_eps = value_clipping_eps
self._fr_kappa = fr_kappa
self._rescale_value = rescale_value
self._rescaler = SignedHyperbolicRescaler() if rescale_value else None
self._target_sync_period = target_sync_period
self._learning_starts = learning_starts
self._model_push_period = model_push_period
self._additional_models_to_update = additional_models_to_update
self._step_counter = 0
self._trajectory = []
self._update_priorities_future = None
self._eval_executor = None
def reset(self) -> None:
self._step_counter = 0
def act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, q, v = self._model.act(obs, self._eps)
return Action(action, info={"q": q, "v": v})
async def async_act(self, timestep: TimeStep) -> Action:
obs = timestep.observation
action, q, v = await self._model.async_act(obs, self._eps)
return Action(action, info={"q": q, "v": v})
def observe_init(self, timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
obs, _, terminated, truncated, _ = timestep
if terminated or truncated:
self._trajectory.clear()
else:
self._trajectory = [{
"obs": obs,
"terminated": terminated,
"truncated": truncated,
}]
async def async_observe_init(self, timestep: TimeStep) -> None:
self.observe_init(timestep)
def observe(self, action: Action, next_timestep: TimeStep) -> None:
if self._replay_buffer is None:
return
act, info = action
obs, reward, terminated, truncated, _ = next_timestep
cur = self._trajectory[-1]
cur["reward"] = reward
cur["action"] = act
cur["q"] = info["q"]
cur["v"] = info["v"]
self._trajectory.append({
"obs": obs,
"terminated": terminated,
"truncated": truncated,
})
async def async_observe(self, action: Action,
next_timestep: TimeStep) -> None:
self.observe(action, next_timestep)
def update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_v = self._model.act(last_step["obs"], self._eps)
last_step["v"] = last_v
replay = self._make_replay()
self._send_replay(replay)
self._trajectory.clear()
async def async_update(self) -> None:
if not self._trajectory:
return
last_step = self._trajectory[-1]
done = last_step["terminated"] or last_step["truncated"]
if self._replay_buffer is None or not done:
return
last_step["reward"] = 0.0
last_v = torch.zeros(1)
if last_step["truncated"]:
# TODO: Find a better way to compute last_v.
_, _, last_v = await self._model.async_act(last_step["obs"],
self._eps)
last_step["v"] = last_v
replay = self._make_replay()
await self._async_send_replay(replay)
self._trajectory.clear()
def connect(self) -> None:
super().connect()
if self._additional_models_to_update is not None:
for m in self._additional_models_to_update:
m.connect()
def train(self,
num_steps: int,
keep_evaluation_loops: bool = False) -> StatsDict:
phase = self._controller.phase()
if keep_evaluation_loops:
self._controller.set_phase(Phase.TRAIN | phase)
else:
self._controller.set_phase(Phase.TRAIN)
self._replay_buffer.warm_up(self._learning_starts)
stats = StatsDict()
console.log(f"Training for num_steps = {num_steps}")
for _ in track(range(num_steps), description="Training..."):
t0 = time.perf_counter()
keys, batch, probabilities = self._replay_buffer.sample(
self._batch_size)
t1 = time.perf_counter()
step_stats = self._train_step(keys, batch, probabilities)
t2 = time.perf_counter()
time_stats = {
"sample_data_time/ms": (t1 - t0) * 1000.0,
"batch_learn_time/ms": (t2 - t1) * 1000.0,
}
stats.extend(step_stats)
stats.extend(time_stats)
self._step_counter += 1
if (self._target_sync_period is not None and
self._step_counter % self._target_sync_period == 0):
self._model.sync_target_net()
if self._additional_models_to_update is not None:
for m in self._additional_models_to_update:
m.sync_target_net()
if self._step_counter % self._model_push_period == 0:
self._model.push()
if self._additional_models_to_update is not None:
for m in self._additional_models_to_update:
m.push()
# Release current model to stable.
self._model.push()
self._model.release()
episode_stats = self._controller.stats(Phase.TRAIN)
stats.update(episode_stats)
self._controller.reset_phase(Phase.TRAIN)
return stats
def eval(self,
num_episodes: Optional[int] = None,
keep_training_loops: bool = False,
non_blocking: bool = False) -> Union[StatsDict, Future]:
if not non_blocking:
return self._eval(num_episodes, keep_training_loops)
if self._eval_executor is None:
self._eval_executor = ThreadPoolExecutor(max_workers=1)
return self._eval_executor.submit(self._eval, num_episodes,
keep_training_loops)
def _make_replay(self) -> List[NestedTensor]:
replay = []
n = len(self._trajectory)
r = torch.zeros(1)
for i in range(n - 2, -1, -1):
k = min(self._n_step, n - 1 - i)
cur = self._trajectory[i]
nxt = self._trajectory[i + k]
obs = cur["obs"]
act = cur["action"]
q = cur["q"]
cur_reward = cur["reward"]
nxt_reward = nxt["reward"]
nxt_v = nxt["v"]
if not isinstance(cur_reward, torch.Tensor):
cur_reward = torch.tensor([cur_reward], dtype=torch.float32)
if not isinstance(nxt_reward, torch.Tensor):
nxt_reward = torch.tensor([nxt_reward], dtype=torch.float32)
if self._max_abs_reward is not None:
cur_reward.clamp_(-self._max_abs_reward, self._max_abs_reward)
nxt_reward.clamp_(-self._max_abs_reward, self._max_abs_reward)
gamma = 0.0 if nxt["terminated"] else self._gamma_pow[k]
r = cur_reward + self._gamma * r - gamma * nxt_reward
if self._rescaler is not None:
nxt_v = self._rescaler.recover(nxt_v)
target = r + gamma * nxt_v
if self._rescaler is not None:
target = self._rescaler.rescale(target)
replay.append({"obs": obs, "action": act, "q": q, "target": target})
return replay
def _send_replay(self, replay: List[NestedTensor]) -> None:
batch = data_utils.stack_fields(replay)
priorities = (batch["target"] - batch["q"]).abs_().squeeze_(-1)
self._replay_buffer.extend(batch, priorities, stacked=True)
async def _async_send_replay(self, replay: List[NestedTensor]) -> None:
batch = data_utils.stack_fields(replay)
priorities = (batch["target"] - batch["q"]).abs_().squeeze_(-1)
await self._replay_buffer.async_extend(batch, priorities, stacked=True)
def _train_step(self, keys: torch.Tensor, batch: NestedTensor,
probabilities: torch.Tensor) -> Dict[str, float]:
device = next(self._model.parameters()).device
batch = nested_utils.map_nested(lambda x: x.to(device), batch)
self._optimizer.zero_grad()
obs = batch["obs"]
action = batch["action"]
target = batch["target"]
behavior_q = batch["q"]
probabilities = probabilities.to(dtype=target.dtype, device=device)
weight = probabilities.pow(-self._importance_sampling_exponent)
weight.div_(weight.max())
q = self._model.q(obs, action)
loss = self._loss(target, q, behavior_q, weight)
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(self._model.parameters(),
self._max_grad_norm)
self._optimizer.step()
with torch.no_grad():
td_err = self._model.td_error(obs, action, target)
priorities = td_err.detach().squeeze(-1).abs().cpu()
# Wait for previous update request
if self._update_priorities_future is not None:
self._update_priorities_future.wait()
# Async update to start next training step when waiting for updating
# priorities.
self._update_priorities_future = self._replay_buffer.async_update(
keys, priorities)
return {
"target": target.detach().mean().item(),
"td_error": td_err.detach().mean().item(),
"loss": loss.detach().mean().item(),
"grad_norm": grad_norm.detach().mean().item(),
}
def _loss(self, target: torch.Tensor, q: torch.Tensor,
behavior_q: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
if self._value_clipping_eps is None:
loss = F.mse_loss(q, target, reduction="none")
if self._fr_kappa is not None:
# Apply functional regularization.
# https://arxiv.org/abs/2106.02613
loss += (self._fr_kappa *
F.mse_loss(q, behavior_q, reduction="none"))
return (loss.squeeze(-1) * weight).mean()
# Apply approximate trust region value update.
# https://arxiv.org/abs/2209.07550
clipped_q = behavior_q + torch.clamp(
q - behavior_q, -self._value_clipping_eps, self._value_clipping_eps)
err1 = F.mse_loss(q, target, reduction="none")
err2 = F.mse_loss(clipped_q, target, reduction="none")
loss = torch.maximum(err1, err2)
if self._fr_kappa is not None:
# Apply functional regularization.
# https://arxiv.org/abs/2106.02613
loss += (self._fr_kappa *
F.mse_loss(q, behavior_q, reduction="none"))
return (loss.squeeze(-1) * weight).mean()
def _eval(self,
num_episodes: int,
keep_training_loops: bool = False) -> StatsDict:
phase = self._controller.phase()
if keep_training_loops:
self._controller.set_phase(Phase.EVAL | phase)
else:
self._controller.set_phase(Phase.EVAL)
self._controller.reset_phase(Phase.EVAL, limit=num_episodes)
while self._controller.count(Phase.EVAL) < num_episodes:
time.sleep(1)
stats = self._controller.stats(Phase.EVAL)
self._controller.set_phase(phase)
return stats
class ApexDQNAgentFactory(AgentFactory):
def __init__(
self,
model: ModelLike,
eps_func: Callable[[int], float],
replay_buffer: Optional[ReplayBufferLike] = None,
controller: Optional[ControllerLike] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
batch_size: int = 512,
max_grad_norm: float = 40.0,
n_step: int = 1,
gamma: float = 0.99,
importance_sampling_exponent: float = 0.4,
max_abs_reward: Optional[int] = None,
rescale_value: bool = False,
value_clipping_eps: Optional[float] = 0.2,
fr_kappa: Optional[float] = 1.0,
target_sync_period: Optional[int] = None,
learning_starts: Optional[int] = None,
model_push_period: int = 10,
additional_models_to_update: Optional[List[ModelLike]] = None
) -> None:
self._model = model
self._eps_func = eps_func
self._replay_buffer = replay_buffer
self._controller = controller
self._optimizer = optimizer
self._batch_size = batch_size
self._max_grad_norm = max_grad_norm
self._n_step = n_step
self._gamma = gamma
self._importance_sampling_exponent = importance_sampling_exponent
self._max_abs_reward = max_abs_reward
self._rescale_value = rescale_value
self._value_clipping_eps = value_clipping_eps
self._fr_kappa = fr_kappa
self._target_sync_period = target_sync_period
self._learning_starts = learning_starts
self._model_push_period = model_push_period
self._additional_models_to_update = additional_models_to_update
def __call__(self, index: int) -> ApexDQNAgent:
model = self._make_arg(self._model, index)
eps = self._eps_func(index)
replay_buffer = self._make_arg(self._replay_buffer, index)
controller = self._make_arg(self._controller, index)
return ApexDQNAgent(
model,
eps,
replay_buffer,
controller,
self._optimizer,
self._batch_size,
self._max_grad_norm,
self._n_step,
self._gamma,
self._importance_sampling_exponent,
self._max_abs_reward,
self._rescale_value,
self._value_clipping_eps,
self._fr_kappa,
self._target_sync_period,
self._learning_starts,
self._model_push_period,
additional_models_to_update=self._additional_models_to_update)
class ConstantEpsFunc:
def __init__(self, eps: float) -> None:
self._eps = eps
def __call__(self, index: int) -> float:
return self._eps
class FlexibleEpsFunc:
"""
Eps function following https://arxiv.org/abs/1803.00933
"""
def __init__(self, eps: float, num: int, alpha: float = 7.0) -> None:
self._eps = eps
self._num = num
self._alpha = alpha
def __call__(self, index: int) -> float:
if self._num == 1:
return self._eps
return self._eps**(1.0 + self._alpha * (index / (self._num - 1)))
| 17,509 | 36.255319 | 80 | py |
rlmeta | rlmeta-main/rlmeta/utils/loss_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
_NAME_TO_LOSS = {
"huber": nn.HuberLoss,
"huber_loss": nn.HuberLoss,
"huberloss": nn.HuberLoss,
"l1": nn.L1Loss,
"l1_loss": nn.L1Loss,
"l1loss": nn.L1Loss,
"mse": nn.MSELoss,
"mse_loss": nn.MSELoss,
"mseloss": nn.MSELoss,
"smooth_l1": nn.SmoothL1Loss,
"smooth_l1_loss": nn.SmoothL1Loss,
"smoothl1": nn.SmoothL1Loss,
"smoothl1loss": nn.SmoothL1Loss,
}
def get_loss(name: str, args: Optional[Dict[str, Any]] = None) -> nn.Module:
loss = _NAME_TO_LOSS[name.lower()]
return loss(
reduction="none") if args is None else loss(reduction="none", **args)
| 872 | 26.28125 | 77 | py |
rlmeta | rlmeta-main/rlmeta/utils/optimizer_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Iterable, Dict, Optional, Union
import torch
_NAME_TO_OPTIMIZER = {
"adadelta": torch.optim.Adadelta,
"adagrad": torch.optim.Adagrad,
"adam": torch.optim.Adam,
"adamw": torch.optim.AdamW,
"sparseadam": torch.optim.SparseAdam,
"adamax": torch.optim.Adamax,
"asgd": torch.optim.ASGD,
"lbfgs": torch.optim.LBFGS,
"nadam": torch.optim.NAdam,
"radam": torch.optim.RAdam,
"rmsprop": torch.optim.RMSprop,
"rprop": torch.optim.Rprop,
"sgd": torch.optim.SGD,
}
def make_optimizer(params: Union[Iterable[torch.Tensor],
Dict[str, torch.Tensor]], name: str,
**kwargs) -> torch.optim.Optimizer:
optimizer_cls = _NAME_TO_OPTIMIZER[name.lower()]
return optimizer_cls(params, **kwargs)
| 990 | 29.96875 | 69 | py |
rlmeta | rlmeta-main/rlmeta/utils/random_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import numpy as np
import torch
def manual_seed(seed: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
| 377 | 21.235294 | 65 | py |
rlmeta | rlmeta-main/rlmeta/utils/data_utils.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
from typing import Any, Dict, Sequence, Tuple, Union
import numpy as np
import torch
import rlmeta.utils.nested_utils as nested_utils
from rlmeta.core.types import Tensor, NestedTensor
_NUMPY_DTYPE_TO_TORCH_MAP = {
bool: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128,
}
_TORCH_DTYPE_TO_NUMPY_MAP = {
torch.bool: bool,
torch.uint8: np.uint8,
torch.int8: np.int8,
torch.int16: np.int16,
torch.int32: np.int32,
torch.int64: np.int64,
torch.float16: np.float16,
torch.float32: np.float32,
torch.float64: np.float64,
torch.complex64: np.complex64,
torch.complex128: np.complex128,
}
def numpy_dtype_to_torch(dtype: np.dtype) -> torch.dtype:
return _NUMPY_DTYPE_TO_TORCH_MAP[dtype]
def torch_dtype_to_numpy(dtype: torch.dtype) -> np.dtype:
return _TORCH_DTYPE_TO_NUMPY_MAP[dtype]
def size(data: Tensor) -> Sequence[int]:
if isinstance(data, np.ndarray):
return data.shape
elif isinstance(data, torch.Tensor):
return data.size()
return ()
def to_numpy(data: Tensor) -> np.ndarray:
return data.detach().cpu().numpy() if isinstance(data,
torch.Tensor) else data
def to_torch(data: Tensor) -> torch.Tensor:
if isinstance(data, np.generic):
return torch.tensor(data)
if isinstance(data, np.ndarray):
data = torch.from_numpy(data)
return data
def stack_tensors(input: Sequence[Tensor]) -> Tensor:
size = input[0].size()
# torch.cat is much faster than torch.stack
# https://github.com/pytorch/pytorch/issues/22462
return torch.stack(input) if len(size) == 0 else torch.cat(input).view(
-1, *size)
def cat_fields(input: Sequence[NestedTensor]) -> NestedTensor:
assert len(input) > 0
return nested_utils.collate_nested(lambda x: torch.cat(x), input)
def stack_fields(input: Sequence[NestedTensor]) -> NestedTensor:
assert len(input) > 0
return nested_utils.collate_nested(stack_tensors, input)
def unstack_fields(input: NestedTensor,
batch_size: int) -> Tuple[NestedTensor, ...]:
if batch_size == 1:
return (nested_utils.map_nested(lambda x: x.squeeze(0), input),)
else:
return nested_utils.unbatch_nested(lambda x: torch.unbind(x), input,
batch_size)
def serialize_to_bytes(data: Any) -> bytes:
buffer = io.BytesIO()
torch.save(data, buffer)
return buffer.getvalue()
def parse_from_bytes(bytes: bytes) -> Any:
buffer = io.BytesIO(bytes)
return torch.load(buffer)
| 3,056 | 26.294643 | 76 | py |
rlmeta | rlmeta-main/rlmeta/utils/running_stats.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
class RunningRMS(nn.Module):
def __init__(self,
size: Union[int, Tuple[int]],
dtype: Optional[torch.dtype] = None) -> None:
super().__init__()
self._size = (size,) if isinstance(size, int) else size
self.register_buffer("_count", torch.zeros(1, dtype=torch.int64))
self.register_buffer("_mean_square", torch.zeros(self._size,
dtype=dtype))
def reset(self) -> None:
self._count.zero_()
self._mean_square.zero_()
def count(self) -> torch.Tensor:
return self._count
def mean_square(self) -> torch.Tensor:
return self._mean_square
def rms(self, eps: Optional[float] = None) -> torch.Tensor:
return self._mean_square.sqrt() if eps is None else ((
self._mean_square + eps).sqrt())
def rrms(self, eps: Optional[float] = None) -> torch.Tensor:
return self._mean_square.rsqrt() if eps is None else ((
self._mean_square + eps).rsqrt())
def update(self, x: torch.Tensor) -> None:
size = x.size()
if size == self._size:
self._count += 1
self._mean_square += (x.square() - self._mean_square) / self._count
else:
assert size[1:] == self._size
cnt = size[0]
self._count += cnt
c = 0.0 if self._count == 0 else cnt / self._count
delta = x.square().mean(dim=0) - self._mean_square
self._mean_square += c * delta
class RunningMoments(nn.Module):
def __init__(self,
size: Union[int, Tuple[int]],
dtype: Optional[torch.dtype] = None) -> None:
super().__init__()
self._size = (size,) if isinstance(size, int) else size
self.register_buffer("_m0", torch.zeros(1, dtype=torch.int64))
self.register_buffer("_m1", torch.zeros(self._size, dtype=dtype))
self.register_buffer("_m2", torch.zeros(self._size, dtype=dtype))
def reset(self) -> None:
self._m0.zero_()
self._m1.zero_()
self._m2.zero_()
def count(self) -> torch.Tensor:
return self._m0
def mean(self) -> torch.Tensor:
return self._m1
def var(self, ddof: int = 0) -> torch.Tensor:
return self._m2 / (self._m0 - ddof)
def std(self, ddof: int = 0, eps: Optional[float] = None) -> torch.Tensor:
return self.var(ddof).sqrt() if eps is None else (self.var(ddof) +
eps).sqrt()
def rstd(self, ddof: int = 0, eps: Optional[float] = None) -> torch.Tensor:
return self.var(ddof).rsqrt() if eps is None else (self.var(ddof) +
eps).rsqrt()
def update(self, x: torch.Tensor) -> None:
size = x.size()
if size == self._size:
self._m0 += 1
delta = x - self._m1
self._m1 += delta / self._m0
self._m2 += delta * (x - self._m1)
else:
assert size[1:] == self._size
m0 = size[0]
m2, m1 = torch.var_mean(x, dim=0, unbiased=False)
n = self._m0 + m0
c = 0.0 if n == 0 else m0 / n
delta = m1 - self._m1
self._m1 += c * delta
self._m2 += m0 * m2 + delta.square() * (c * self._m0)
self._m0 = n
class RunningTDError(nn.Module):
"""
Running TD Error estimation introduced by https://arxiv.org/abs/2105.05347
"""
def __init__(self,
size: Union[int, Tuple[int]],
dtype: Optional[torch.dtype] = None) -> None:
super().__init__()
self._running_gamma = RunningMoments(size, dtype)
self._running_r = RunningMoments(size, dtype)
self._running_g = RunningRMS(size, dtype)
def reset(self) -> None:
self._running_gamma.reset()
self._running_r.reset()
self._running_g.reset()
def var(self) -> torch.Tensor:
return (self._running_r.var() +
self._running_gamma.var() * self._running_g.mean_square())
def std(self) -> torch.Tensor:
return self.var().sqrt()
def update(self, gamma: torch.Tensor, r: torch.Tensor,
g: torch.Tensor) -> None:
self._running_gamma.update(gamma)
self._running_r.update(r)
self._running_g.update(g)
| 4,699 | 33.306569 | 79 | py |
rlmeta | rlmeta-main/rlmeta/data/segment_tree.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Union
import numpy as np
import torch
from _rlmeta_extension import SumSegmentTreeFp32, SumSegmentTreeFp64
from rlmeta.core.types import Tensor
SumSegmentTreeImpl = Union[SumSegmentTreeFp32, SumSegmentTreeFp64]
Index = Union[int, np.ndarray, torch.Tensor]
Value = Union[float, np.ndarray, torch.Tensor]
class SumSegmentTree:
def __init__(self, size: int, dtype: np.dtype = np.float64) -> None:
self._dtype = dtype
if dtype == np.float32:
self._impl = SumSegmentTreeFp32(size)
elif dtype == np.float64:
self._impl = SumSegmentTreeFp64(size)
else:
assert False, "Unsupported data type " + str(dtype)
@property
def dtype(self) -> np.dtype:
return self._dtype
@property
def size(self) -> int:
return self._impl.size
@property
def capacity(self) -> int:
return self._impl.capacity
def __len__(self) -> int:
return len(self._impl)
def __getitem__(self, index: Index) -> Value:
return self._impl[index]
def at(self, index: Index) -> Value:
return self._impl.at(index)
def __setitem__(self, index: Index, value: Value) -> None:
self._impl[index] = value
def update(self,
index: Index,
value: Value,
mask: Optional[Tensor] = None) -> None:
if mask is None:
self._impl.update(index, value)
else:
self._impl.update(index, value, mask)
def query(self, l: Index, r: Index) -> Value:
return self._impl.query(l, r)
def scan_lower_bound(self, value: Value) -> Index:
return self._impl.scan_lower_bound(value)
| 1,899 | 26.941176 | 72 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_ce_lshtc1.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20220510 # gonna use this integer to sample random seeds for different functions
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Neural neworks baseline on LSHTC1")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
print("num of classes: ", num_classes)
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, num_classes):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, num_classes),
)
def forward(self, x):
return self.model(x)
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=2048, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 10
loss_f = torch.nn.CrossEntropyLoss(reduction='sum')
model = Net(num_features, 4096, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-2, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[5, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,797 | 40.282609 | 170 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_dmoz.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on Dmoz")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, embed_dim),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=256, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
def run_exp(embed_dim):
label_embed = (rng.integers(low=0, high=2, size=(num_classes, embed_dim)) * 2 - 1) / math.sqrt(embed_dim)
epsilon = np.max(np.abs(label_embed @ label_embed.T - np.eye(num_classes)))
print("epsilon = ", epsilon)
label_embed = torch.tensor(np.float32(label_embed)).to(device)
epochs = 5
loss_f = torch.nn.MSELoss(reduction='sum')
model = Net(num_features=num_features, hidden_dim=2500, embed_dim=embed_dim).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-5)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[2, 4], gamma=0.1)
epoch_time_hist = []
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
for epoch in range(1, epochs+1):
start = time()
train_le(model, label_embed, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_le(model, label_embed, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
prediction_time = time() - prediction_start
return val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time
val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time = run_exp(int(sys.argv[1]))
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 4,139 | 42.125 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_odp.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on Dmoz")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(hidden_dim),
torch.nn.Linear(hidden_dim, 16*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(16*hidden_dim),
torch.nn.Linear(16*hidden_dim, 8*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(8*hidden_dim),
torch.nn.Linear(8*hidden_dim, embed_dim),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=2048, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
def run_exp(embed_dim):
label_embed = (rng.integers(low=0, high=2, size=(num_classes, embed_dim)) * 2 - 1) / math.sqrt(embed_dim)
label_embed = torch.tensor(np.float32(label_embed)).to(device)
epochs = 20
loss_f = torch.nn.MSELoss(reduction='sum')
model = Net(num_features=num_features, hidden_dim=2048, embed_dim=embed_dim).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-5)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, ], gamma=0.1)
epoch_time_hist = []
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
for epoch in range(1, epochs+1):
start = time()
train_le(model, label_embed, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_le(model, label_embed, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
prediction_time = time() - prediction_start
return val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time
val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time = run_exp(int(sys.argv[1]))
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 4,345 | 42.029703 | 170 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_lshtc1.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on LSHTC1")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, embed_dim),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=128, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
def run_exp(embed_dim):
label_embed = (rng.integers(low=0, high=2, size=(num_classes, embed_dim)) * 2 - 1) / math.sqrt(embed_dim)
epsilon = np.max(np.abs(label_embed @ label_embed.T - np.eye(num_classes)))
print("epsilon = ", epsilon)
label_embed = torch.tensor(np.float32(label_embed)).to(device)
epochs = 5
loss_f = torch.nn.MSELoss(reduction='sum')
model = Net(num_features=num_features, hidden_dim=4096, embed_dim=embed_dim).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-5)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[2 ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_le(model, label_embed, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_le(model, label_embed, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_le(model, label_embed, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_le(model, label_embed, loss_f, device, test_loader)
prediction_time = time() - prediction_start
return val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time
val_loss_hist, val_acc_hist, test_loss_hist, test_acc_hist, epoch_time_hist, prediction_time = run_exp(int(sys.argv[1]))
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 4,522 | 44.23 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_sq_dmoz.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20220510 # gonna use this integer to sample random seeds for different functions
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Neural neworks baseline on Dmoz")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
print("num of classes: ", num_classes)
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, num_classes):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, num_classes),
)
def forward(self, x):
return self.model(x)
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=256, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 5
loss_f = SquaredLoss()
model = Net(num_features, 2500, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,764 | 39.923913 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_ce_dmoz.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20220510 # gonna use this integer to sample random seeds for different functions
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Neural neworks baseline on Dmoz")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
print("num of classes: ", num_classes)
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, num_classes):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, num_classes),
)
def forward(self, x):
return self.model(x)
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=256, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 5
loss_f = torch.nn.CrossEntropyLoss(reduction='sum')
model = Net(num_features, 2500, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-2, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,793 | 40.23913 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_sq_odp.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on odp")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(hidden_dim),
torch.nn.Linear(hidden_dim, 16*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(16*hidden_dim),
torch.nn.Linear(16*hidden_dim, 4*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(4*hidden_dim),
torch.nn.Linear(4*hidden_dim, embed_dim),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=2048, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 20
loss_f = SquaredLoss()
model = Net(num_features, 2048, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. sq_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. sq_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. sq_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. sq_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 4,026 | 40.091837 | 170 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_sq_lshtc1.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
import sys
seed = 20220510 # gonna use this integer to sample random seeds for different functions
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Neural neworks baseline on LSHTC1")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
print("num of classes: ", num_classes)
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, num_classes):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, num_classes),
)
def forward(self, x):
x = self.model(x)
row_norms = torch.norm(x, dim=1, p=2).unsqueeze(1)
return x/row_norms
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=128, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 5
loss_f = SquaredLoss()
model = Net(num_features, 4096, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. l2_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. l2_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,849 | 39.957447 | 169 | py |
JOLLE | JOLLE-main/label_embedding_python/run_nn_ce_odp.py | import torch
import numpy as np
from time import time
from sklearn.datasets import load_svmlight_files
import math
from nn_utils import *
seed = 20230508
max_int = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
train_path = # TODO: fill the data path
val_path = # TODO: fill the data path
test_path = # TODO: fill the data path
X_train, y_train, X_val, y_val, X_test, y_test = load_svmlight_files((train_path, val_path, test_path), dtype=np.float32, multilabel=False)
print("Experiments on odp")
print("num of training data: ", X_train.shape[0])
print("num of validation data: ", X_val.shape[0])
print("num of test data: ", X_test.shape[0])
print("num of features: ", X_train.shape[1])
num_classes = len(set(y_train.tolist() + y_val.tolist() + y_test.tolist()))
y_train = y_train.astype(np.int32)
y_val = y_val.astype(np.int32)
y_test = y_test.astype(np.int32)
num_features = X_train.shape[1]
class Net(torch.nn.Module):
def __init__(self, num_features, hidden_dim, embed_dim):
super(Net, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(hidden_dim),
torch.nn.Linear(hidden_dim, 16*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(16*hidden_dim),
torch.nn.Linear(16*hidden_dim, 4*hidden_dim),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(4*hidden_dim),
torch.nn.Linear(4*hidden_dim, embed_dim),
)
def forward(self, x):
return self.model(x)
train_loader = torch.utils.data.DataLoader(sparse_dataset(X_train, y_train), batch_size=2048, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
test_loader = torch.utils.data.DataLoader(sparse_dataset(X_test, y_test), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
val_loader = torch.utils.data.DataLoader(sparse_dataset(X_val, y_val), batch_size=1024, shuffle=True, num_workers=4, pin_memory=True, collate_fn=sparse_collate_coo)
device = torch.device("cuda")
epochs = 20
loss_f = torch.nn.CrossEntropyLoss(reduction='sum')
model = Net(num_features, 2048, num_classes).to(device)
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-2, weight_decay=0e-6)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, ], gamma=0.1)
epoch_time_hist = [0 ,]
train_time = 0
val_loss_hist = []
val_acc_hist = []
test_loss_hist = []
test_acc_hist = []
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("before training. validation results. ce_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("before training. test results. ce_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
for epoch in range(1, epochs+1):
start = time()
train_ce(model, loss_f, device, train_loader, optimizer, epoch)
scheduler.step()
train_time += time() - start
val_loss, val_acc = test_ce(model, loss_f, device, val_loader)
print("validation results. ce_loss: {:.6f}, accuracy: {:.4f}".format(val_loss, val_acc))
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
print("test results. ce_loss: {:.6f}, accuracy: {:.4f}".format(test_loss, test_acc))
val_loss_hist.append(val_loss)
val_acc_hist.append(val_acc)
test_loss_hist.append(test_loss)
test_acc_hist.append(test_acc)
epoch_time_hist.append(train_time)
# measure prediction time:
prediction_start = time()
test_loss, test_acc = test_ce(model, loss_f, device, test_loader)
prediction_time = time() - prediction_start
print("validation loss: ", val_loss_hist)
print("validation accuracy: ", val_acc_hist)
print("test loss: ", test_loss_hist)
print("test accuracy: ", test_acc_hist)
print("training time by epoch = ", epoch_time_hist)
print("prediction time = ", prediction_time) | 3,972 | 40.385417 | 170 | py |
JOLLE | JOLLE-main/label_embedding_python/nn_utils.py | import torch
from sklearn.metrics import pairwise_distances
import numpy as np
class sparse_dataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
self.n_features = x.shape[1]
def __len__(self):
return self.x.shape[0]
def __getitem__(self, i):
return self.x.indices[self.x.indptr[i]:self.x.indptr[i+1]], self.x.data[self.x.indptr[i]:self.x.indptr[i+1]], self.y[i], self.n_features
def sparse_collate_coo(batch):
r = []
c = []
vals = []
y = []
n_features = batch[0][-1]
for i, (indices, data, yi, _) in enumerate(batch):
r.extend([i] * indices.shape[0])
c.extend(indices)
vals.extend(data)
y.append(yi)
return ([r, c], vals, (len(batch), n_features)), y
class SquaredLoss(torch.nn.Module):
def __init__(self):
super(SquaredLoss, self).__init__()
def forward(self, outputs, targets):
one_hot_approx = torch.zeros_like(outputs)
one_hot_approx.scatter_(1, targets.unsqueeze(1), 1)
return torch.sum((outputs - one_hot_approx) ** 2)
def train_le(model, label_embed, loss_f, device, train_loader, optimizer, epoch, log_interval=50):
model.train()
for idx, ((locs, vals, size), y) in enumerate(train_loader):
x = torch.sparse_coo_tensor(locs, vals, size=size, dtype=torch.float32, device=device)
y_embed = torch.index_select(label_embed, 0, torch.tensor(y, dtype=torch.int32).to(device))
optimizer.zero_grad()
embed_out = model(x)
loss = loss_f(embed_out, y_embed) / len(y)
loss.backward()
optimizer.step()
if (idx + 1) % log_interval == 0:
print("train epoch: {}, batch: {}/{}, loss: {:.6f}".format(epoch, idx+1, len(train_loader), loss.item()))
def find1NN_cuda(out_cuda, label_embed_cuda):
#dist_m = torch.cdist(out_cuda.reshape(1, out_cuda.shape[0], -1), label_embed_cuda.reshape(1, label_embed_cuda.shape[0], -1))
#dist_m = dist_m.reshape(dist_m.shape[1], -1)
#oneNNs = torch.argmin(dist_m, dim=1)
gram_m = torch.matmul(out_cuda, torch.transpose(label_embed_cuda, 0, 1))
return torch.argmax(gram_m, dim=1)
def test_le(model, label_embed, loss_f, device, test_loader):
model.eval()
mean_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for idx, ((locs, vals, size), y) in enumerate(test_loader):
x = torch.sparse_coo_tensor(locs, vals, size=size, dtype=torch.float32, device=device)
y_embed = torch.index_select(label_embed, 0, torch.tensor(y, dtype=torch.int32).to(device))
embed_out = model(x)
mean_loss += loss_f(embed_out, y_embed).item()
embed_out_detached = embed_out.detach()
preds = find1NN_cuda(embed_out_detached, label_embed).cpu().numpy()
correct += np.sum(preds==y)
total += preds.shape[0]
del x, y_embed, embed_out
return mean_loss / len(test_loader.dataset), correct/total
def train_ce(model, loss_f, device, train_loader, optimizer, epoch, log_interval=50):
model.train()
for idx, ((locs, vals, size), y) in enumerate(train_loader):
x = torch.sparse_coo_tensor(locs, vals, size=size, dtype=torch.float32, device=device)
optimizer.zero_grad()
out = model(x)
loss = loss_f(out, torch.tensor(y, dtype=torch.int64).to(device)) / len(y)
loss.backward()
optimizer.step()
if (idx + 1) % 10 == 0:
print("train epoch: {}, batch: {}/{}, loss: {:.6f}".format(epoch, idx+1, len(train_loader), loss.item()))
def test_ce(model, loss_f, device, test_loader):
model.eval()
mean_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for idx, ((locs, vals, size), y) in enumerate(test_loader):
x = torch.sparse_coo_tensor(locs, vals, size=size, dtype=torch.float32, device=device)
out = model(x)
mean_loss += loss_f(out, torch.tensor(y, dtype=torch.int64).to(device)).item()
preds = out.detach().cpu().argmax(dim=1, keepdim=False).numpy()
correct += np.sum(preds==np.array(y))
total += preds.shape[0]
return mean_loss / len(test_loader.dataset), correct/total | 4,296 | 41.127451 | 145 | py |
apicarver | apicarver-main/testCarver/pythonCode/runEvoMaster.py | import glob
import os
import shutil
from datetime import datetime
import constants
from constants import RUN_SCHEMATHESIS_COMMAND, APPS, STATUS_SUCCESSFUL, STATUS_SKIPPED, STATUS_ERRORED, CASETTE_YAML, \
SCHEMATHESIS_OUTPUT
from utilsRun import monitorProcess, cleanup, startProcess, restartDocker, MODE
def runAllApps(RUNTIME=30):
succesful = []
unsuccesful = []
skipped = []
for app in APPS:
if app in excludeApps:
continue
results = runAlgo(app, RUNTIME=RUNTIME)
for result in results:
status = result["status"]
command = result["command"]
if status == STATUS_SUCCESSFUL:
succesful.append(command)
elif status == STATUS_SKIPPED:
skipped.append(command)
elif status == STATUS_ERRORED:
unsuccesful.append(command)
print("succesful : {0}".format(str(len(succesful))))
print(succesful)
print("skipped : {0}".format(str(len(skipped))))
print(skipped)
print("unsuccesful : {0}".format(str(len(unsuccesful))))
print(unsuccesful)
if DRY_RUN:
print("Predicted run time : " + str(RUNTIME * len(succesful)))
def getExistingRuns(appName, ALL_CRAWLS=os.path.join(os.path.abspath(".."), "out")):
gtYaml = []
crawljaxOutputPath = os.path.abspath(os.path.join(ALL_CRAWLS, appName))
if os.path.exists(crawljaxOutputPath):
gtYaml = glob.glob(crawljaxOutputPath + "/" + constants.EVOMASTER_OUTPUT + "/" + CASETTE_YAML)
return {"path": crawljaxOutputPath, "existingValidCrawls": gtYaml}
return {"path": None, "gtYaml": gtYaml}
def getSwaggerUrl(appName):
if appName == "petclinic":
return "http://localhost:9966/petclinic/v3/api-docs"
elif appName == "parabank":
return "http://localhost:8080/parabank-3.0.0-SNAPSHOT/services/bank/swagger.yaml"
elif appName == "realworld":
return "http://localhost:3000/api"
elif appName == "booker":
return {"booking": "http://localhost:3000/booking/v3/api-docs/booking-api",
"branding" : "http://localhost:3002/branding/v3/api-docs/branding-api",
"message": "http://localhost:3006/message/v3/api-docs/message-api",
"report": "http://localhost:3005/report/v3/api-docs/report-api",
"room": "http://localhost:3001/room/v3/api-docs/room-api",
"auth": "http://localhost:3004/auth/v3/api-docs/auth-api"
}
elif appName == "jawa":
return "http://localhost:8080/v2/api-docs"
elif appName == "ecomm":
return "http://localhost:8080/api/v2/api-docs"
elif appName == "medical":
return "http://localhost:8080/v2/api-docs"
elif appName == "shopizer":
return "http://localhost:8080/v2/api-docs"
RUN_EVOMASTER_COMMAND = ['java', '-jar', './libs/evomaster.jar', '--blackBox', 'true']
def buildEvoMasterCommand(outputDir, baseURL, maxTime, targetURL=None):
command = RUN_EVOMASTER_COMMAND.copy()
command.append("--bbSwaggerUrl")
command.append(baseURL)
command.append('--outputFormat')
command.append('JAVA_JUNIT_4')
if not os.path.exists(outputDir):
os.makedirs(outputDir)
command.append('--outputFolder')
command.append(outputDir)
command.append('--maxTime')
command.append(maxTime)
if targetURL is not None:
command.append('--bbTargetUrl')
command.append(targetURL)
return command
def runAlgo(appName, RUNTIME=60,
logFile=os.path.join("../logs", "evomaster_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
rerun=False, EVOMASTER_OUTPUT="evomaster"):
maxTime= str(RUNTIME) + "m"
results = []
commands = []
# For GroundTruth OpenAPI
srcPath = os.path.join("..", "src", "main", "resources", "webapps", appName)
openApiPath = os.path.join(srcPath, "openapi.yml")
for runIndex in range(1):
curr_commands = []
outputDir = os.path.join("..", "out", appName, EVOMASTER_OUTPUT, str(runIndex))
if appName == "parabank":
# No online swagger available
command = buildEvoMasterCommand(outputDir=outputDir, baseURL=getSwaggerUrl(appName), maxTime=maxTime, targetURL=constants.getHostURL(appName))
curr_commands.append(command)
elif appName == "booker":
baseURLs=getSwaggerUrl(appName)
for key in baseURLs.keys():
curr_commands.append(buildEvoMasterCommand(outputDir=os.path.join(outputDir, key), baseURL=baseURLs[key], maxTime=str(round(RUNTIME/6)+1)+'m'))
else:
command = buildEvoMasterCommand(outputDir=outputDir, baseURL=getSwaggerUrl(appName), maxTime=maxTime)
curr_commands.append(command)
if (not rerun) and os.path.exists(os.path.join(outputDir, constants.CASETTE_YAML)):
# There is a previous execution and rerun is disabled
results.append({"command": curr_commands, "status": STATUS_SKIPPED, "message": "previous execution data exists"})
else:
commands.append({"command":curr_commands, "outputDir": outputDir})
if not DRY_RUN:
SLEEPTIME = 30
if appName=="shopizer":
SLEEPTIME= 120
restartDocker(appName, SLEEPTIME)
for command in curr_commands:
if DRY_RUN:
results.append({"command": command, "status": STATUS_SUCCESSFUL, "message": "DRYRUN"})
continue
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir=None)
if proc == None:
print("Ignoring error command.")
results.append({"command": command, "status": STATUS_ERRORED, "message": "Command could not be executed"})
continue
status = monitorProcess(proc, timeStep=30)
print("Done : {0}".format(command))
results.append({"command": command, "status": STATUS_SUCCESSFUL, "message": "Succesful"})
if not DRY_RUN:
cleanup(MODE.ST, appName, os.path.join(outputDir, "cov"))
# if DRY_RUN:
# status = STATUS_SUCCESSFUL
# return results
#
# if isDockerized(appName):
# restartDocker(getDockerName(appName))
return results
def getExistingTest():
for app in APPS:
print(getExistingRuns(app))
DRY_RUN = False
excludeApps = ['tmf', 'mdh']
if __name__ == "__main__":
print("hello")
# getExistingTest()
runAllApps(RUNTIME=2)
| 6,598 | 35.865922 | 160 | py |
apicarver | apicarver-main/testCarver/pythonCode/runGeneratedTests.py | import glob
import os
from datetime import datetime, timedelta
import constants
from constants import APPS, STATUS_SUCCESSFUL, STATUS_ERRORED
from utilsRun import restartDocker, startProcess, monitorProcess, getDockerName, cleanup, MODE, exportJson
# BASE_COMMAND_HYBRID = ['sh', 'runTests.sh']
BASE_COMMAND = ['sh', 'runTests.sh']
# BASE_COMMAND=['java', '-jar', '/art-fork_icseBranch/crawljax/examples/target/crawljax-examples-3.7-SNAPSHOT-jar-with-dependencies.jar']
def executeTestsDummy(appName, algo, crawl, url=None,
logFile=os.path.join("logs", "testRunLog_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
testResultsFolder=None):
try:
status = saveTestRunInfo(crawl=crawl, url=url,
dockerName=getDockerName(appName),
testResultsFolder=testResultsFolder,
version=APP_VERSION)
except Exception as ex:
print(ex)
print("Exception saving test run info")
status = False
def executeTests(appName, algo, crawl, url=None,
logFile=os.path.join("logs", "testRunLog_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
testResultsFolder=None):
command = BASE_COMMAND.copy()
command.append(crawl)
# if url is not None:
# command.append(url)
if appName in ["petclinic", "booker", "medical", "ecomm"]:
command.append(appName)
if DRY_RUN:
status = STATUS_SUCCESSFUL
return status, command
restartDocker(appName)
startTime = datetime.now()
proc = startProcess(command, logFile, changeDir=None, DEBUG=False)
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
return status, command
timeout = 200
status = monitorProcess(proc, 6 * 60, timeStep=5)
print("Done : {0}".format(command))
endTime = datetime.now()
testDuration = (endTime - startTime)/timedelta(milliseconds=1)
try:
status = saveTestRunInfo(crawl=crawl, url=url,
dockerName=getDockerName(appName),
testResultsFolder=testResultsFolder,
version=APP_VERSION, duration = testDuration)
except Exception as ex:
print(ex)
print("Exception saving test run info")
status = False
cleanup(MODE.CARVER, appName=appName, outputDir=testResultsFolder)
return status, command
def saveTestRunInfo(crawl,url, dockerName=None, testResultsFolder=None, version=None, duration = None):
if version is None:
version=APP_VERSION
testRunInfo = {'version': version, 'url': url, 'docker':dockerName, 'duration': duration}
testRunInfoFile = os.path.join(testResultsFolder, 'testRunInfo.json')
if testResultsFolder == None:
testResultsFolder = os.path.join(crawl, 'test-results', '0')
print("Assuming test results folder {0}".format(testResultsFolder))
if not os.path.exists(testResultsFolder):
print("Test results folder not found {0}".format(testResultsFolder))
print("Error: Test Run not successful!!")
return False
if os.path.exists(testRunInfoFile):
print("Error: Test run file already exists at {0}".format(testRunInfo))
return False
else:
print(testRunInfo)
if not DRY_RUN:
exportJson(testRunInfoFile, testRunInfo)
return True
def getTestRun(crawl):
returnList = []
testResultsFolder = os.path.join(crawl, "test-results")
if os.path.exists(testResultsFolder):
testRunList = os.listdir(testResultsFolder)
print("Found test runs {0}".format(testRunList))
for testRun in testRunList:
if testRun == '.DS_Store':
continue
returnList.append(os.path.join(testResultsFolder, testRun))
return returnList
return []
def runTests(crawl, rerun=False):
split = os.path.split(os.path.split(os.path.split(crawl)[0])[0])
appName = os.path.split(split[0])[1]
runInfo = split[1]
print(appName)
print(runInfo)
testRuns = getTestRun(crawl)
if len(testRuns) > 0:
if not rerun:
return False
else:
status, command = executeTests(
appName, "HYBRID", crawl,
url=None,
logFile=os.path.join(crawl, "testRun_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
testResultsFolder=os.path.join(crawl,'test-results', str(len(testRuns))))
print(command)
print(status)
return True
def runAllTests(crawls, rerun=False):
success = []
skipped = []
for crawl in crawls:
status = runTests(crawl, rerun)
if status:
success.append(crawl)
else:
skipped.append(crawl)
print("succeeded {0}: {1}".format(len(success), success))
print("skipped {0}: {1}".format(len(skipped), skipped))
return success, skipped
def getHostNames():
return ["localhost"]
def getExistingCrawl(appName, algo, threshold, runtime, ALL_CRAWLS = os.path.join(os.path.abspath(".."), "out")):
existingValidCrawls = []
hostNames = getHostNames()
for host in hostNames:
crawlFolderName = appName + "_" + algo + "_" + str(float(threshold))+ "_" + str(runtime) + "mins"
crawljaxOutputPath = os.path.abspath(os.path.join(ALL_CRAWLS, appName, crawlFolderName, host))
if os.path.exists(crawljaxOutputPath):
existingValidCrawls = glob.glob(crawljaxOutputPath + "/crawl*/result.json")
return {"path": crawljaxOutputPath, "existingValidCrawls": existingValidCrawls}
return {"path": None, "existingValidCrawls": existingValidCrawls}
def getCrawlsToAnalyze(crawlPath=None,app=None, host=None, runtime = 5, bestCrawls = False):
if crawlPath==None:
crawlPath = os.path.join(".","out")
crawlMap = {}
returnCrawls = []
missingCrawls = []
for appName in APPS:
if app!=None and app!=appName:
continue
algoStr = "HYBRID"
threshold = "-1.0"
existingCrawlData = getExistingCrawl(appName, algoStr, threshold, runtime, ALL_CRAWLS = crawlPath)
existingValidCrawls = existingCrawlData['existingValidCrawls']
crawljaxOutputPath = existingCrawlData['path']
print(existingCrawlData)
if crawljaxOutputPath is None or len(existingValidCrawls) == 0:
crawlFolderName = appName + "_" + algoStr + "_" + str(float(threshold))+ "_" + str(runtime) + "mins"
crawljaxOutputPath = os.path.abspath(os.path.join(crawlPath, appName, crawlFolderName))
missingCrawls.append(crawljaxOutputPath)
for validCrawl in existingValidCrawls:
if validCrawl not in returnCrawls:
path,file = os.path.split(validCrawl)
returnCrawls.append(path)
crawlMap[path] = appName
print(len(returnCrawls))
return returnCrawls, crawlMap, missingCrawls
# APPS=["medical"]
DRY_RUN = False
APP_VERSION = -1
if __name__ == "__main__":
# testCleanup()
# testGetThresholds()
# testRestartDocker()
# testChangeDir()
# testGetBestThresholds()
returnCrawls, crawlMap, missingCrawls = getCrawlsToAnalyze(crawlPath="../crawlOut", app=None, host="localhost",
runtime=30, bestCrawls=True)
print(returnCrawls)
print(crawlMap)
print("Missing")
print(missingCrawls)
# executeTestsDummy("petclinic", "HYBRID", "/TestCarving/crawlOut/petclinic/petclinic_HYBRID_-1.0_30mins/localhost/crawl0",
# None)
runAllTests(returnCrawls, rerun=False)
# addTestRunInfos(returnCrawls, app_version=APP_VERSION)
| 6,899 | 29 | 137 | py |
apicarver | apicarver-main/testCarver/pythonCode/utilsRun.py | import csv
import json
import os
import subprocess
from datetime import datetime
from enum import Enum
from subprocess import check_call, CalledProcessError, Popen
from time import sleep
import psutil
from constants import DOCKER_LOCATION, STATUS_SUCCESSFUL
def getDockerName(appName):
return appName
def restartDockerVersion(appName):
# if version == None:
# restartDocker(getDockerName(appName))
# return
# dockerList = getDockerList(version)
# print(dockerList[appName])
restartDocker(getDockerName(appName=appName))
def restartDocker(dockerName, SLEEPTIME=30):
stopDocker = [os.path.join(DOCKER_LOCATION, dockerName, 'stop-docker.sh')]
try:
check_call(stopDocker)
except CalledProcessError as ex:
print("Could not stop docker docker? ")
print(ex)
startDocker = [os.path.join(DOCKER_LOCATION, dockerName, 'run-docker.sh')]
try:
check_call(startDocker)
sleep(SLEEPTIME)
except CalledProcessError as ex:
print("No matching processes Found for docker? ")
print(ex)
class MODE(Enum):
CARVER = "carver"
ST = "schemathesis"
def cleanup(mode, appName=None, outputDir = None):
if mode is MODE.CARVER:
killChromeDriverCommand = ['killall', 'chromedriver']
try:
check_call(killChromeDriverCommand)
except CalledProcessError as ex:
print("No matching processes Found for chromedriver? ")
print(ex)
killGoogleChromeCommand = ['killall', 'chrome']
try:
check_call(killGoogleChromeCommand)
except CalledProcessError as ex:
print("No matching processes Found for Google Chrome? ")
print(ex)
if appName is None:
print("No appName provided. Not resetting Docker")
return
dockerName = getDockerName(appName)
if not dockerName is None:
stopDocker = [os.path.join(DOCKER_LOCATION, dockerName, 'stop-docker.sh')]
if outputDir is not None:
stopDocker.append(outputDir)
try:
check_call(stopDocker)
except CalledProcessError as ex:
print("Could not stop docker docker? ")
print(ex)
def kill_process(pid):
try:
proc = psutil.Process(pid)
print("Killing", proc.name())
proc.kill()
except psutil.NoSuchProcess as ex:
print("No Such Process : {0}".format(pid))
def monitorProcess(proc, runtime=30, timeStep=30, timeout=200, crawljaxOutputPath=None, existing=-1):
done = False
timeDone = 0
graceTime = 60
status = None
while not done:
poll = proc.poll()
if poll == None:
print("process still running {0}/{1}".format(str(timeDone), str(runtime * 60)))
sleep(timeStep)
timeDone += timeStep
else:
done = True
status = STATUS_SUCCESSFUL
break
return status
def changeDirectory(path):
try:
os.chdir(path)
return True
except OSError as ex:
print("Could not change director")
print(ex)
return False
def startProcess(command, outputPath="output_crawljax_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log",
changeDir=None,
DEBUG=False):
changed = False
current = os.getcwd()
try:
if changeDir is not None:
changed = changeDirectory(changeDir)
if DEBUG:
process = Popen(command)
return process
else:
print("outputtting log to {0}".format(outputPath))
with open(outputPath, 'w') as outputFile:
proc = Popen(command, stderr=subprocess.STDOUT, stdout=outputFile)
print("Started {0} with PID {1}".format(command, proc.pid))
return proc
except Exception as ex:
print(ex)
print("Exception try to run {0} : ".format(command))
finally:
if changed:
changeDirectory(current)
def exportJson(file, jsonData):
with open(file, "w") as write_file:
json.dump(jsonData, write_file)
def writeCSV_Dict(csvFields, csvRows, dst):
# print(csvRows)
with open(dst, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csvFields)
writer.writeheader()
for row in csvRows:
writer.writerow(row)
def writeCSV(rows, dest):
with open(dest, 'w') as csvFile:
writer = csv.writer(csvFile, rows)
for row in rows:
writer.writerow(row)
writer.writerow(row)
def importJson(jsonFile):
try:
with open(jsonFile, encoding='utf-8') as data_file:
data = json.loads(data_file.read())
return data
except Exception as ex:
print("Exception occured while importing json from : " + jsonFile)
print(ex)
return None
if __name__=="__main__":
cleanup(MODE.ST, appName="realworld", outputDir="../out/testProbe/cov") | 4,316 | 22.983333 | 113 | py |
apicarver | apicarver-main/testCarver/pythonCode/runSchemathesis.py | import glob
import os
import shutil
from datetime import datetime
import constants
from constants import RUN_SCHEMATHESIS_COMMAND, APPS, STATUS_SUCCESSFUL, STATUS_SKIPPED, STATUS_ERRORED, CASETTE_YAML, \
SCHEMATHESIS_OUTPUT
from utilsRun import monitorProcess, cleanup, startProcess, restartDocker, MODE
def runAllApps(RUNTIME=30):
succesful = []
unsuccesful = []
skipped = []
for app in APPS:
if app in excludeApps:
continue
baseURL = constants.getHostURL(app)
if baseURL is None:
skipped.append(app)
continue
results = runAlgo(app, baseURL)
for result in results:
status = result["status"]
command = result["command"]
if status == STATUS_SUCCESSFUL:
succesful.append(command)
elif status == STATUS_SKIPPED:
skipped.append(command)
elif status == STATUS_ERRORED:
unsuccesful.append(command)
print("succesful : {0}".format(str(len(succesful))))
print(succesful)
print("skipped : {0}".format(str(len(skipped))))
print(skipped)
print("unsuccesful : {0}".format(str(len(unsuccesful))))
print(unsuccesful)
if DRY_RUN:
print("Predicted run time : " + str(RUNTIME * len(succesful)))
def getExistingRuns(appName, ALL_CRAWLS=os.path.join(os.path.abspath(".."), "out")):
gtYaml = []
crawljaxOutputPath = os.path.abspath(os.path.join(ALL_CRAWLS, appName))
if os.path.exists(crawljaxOutputPath):
gtYaml = glob.glob(crawljaxOutputPath + "/" + SCHEMATHESIS_OUTPUT + "/" + CASETTE_YAML)
carverYaml = glob.glob(crawljaxOutputPath + "/" + constants.SCHEMATHESIS_CARVER + "/" + CASETTE_YAML)
proberYaml = glob.glob(crawljaxOutputPath + "/" + constants.SCHEMATHESIS_PROBER + "/" + CASETTE_YAML)
return {"path": crawljaxOutputPath, "existingValidCrawls": gtYaml}
return {"path": None, "gtYaml": gtYaml, "carverYaml": carverYaml, "proberYaml": proberYaml}
def buildSchemathesisCommand(outputDir, openApiPath, baseURL):
command = RUN_SCHEMATHESIS_COMMAND.copy()
command.append("--cassette-path")
if not os.path.exists(outputDir):
os.makedirs(outputDir)
command.append(os.path.join(outputDir, CASETTE_YAML))
command.append(openApiPath)
command.append("--base-url")
command.append(baseURL)
command.append('--hypothesis-max-examples')
command.append('1000')
return command
def getEnhancedYaml(appName):
appOutput = os.path.abspath(os.path.join("../out", appName))
if not os.path.exists(appOutput):
print("no output folder for {}".format(appName))
return None
carverYaml = glob.glob(appOutput + "/*/run/*/" + constants.ENHANCED_YAML)
proberYaml = glob.glob(appOutput + "/*/oas/*/" + constants.ENHANCED_YAML)
return {"carverYaml": carverYaml, "proberYaml": proberYaml}
def runAlgo(appName, baseURL,
logFile=os.path.join("../logs", "schemaThesis_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
rerun=False):
results = []
commands = []
# For GroundTruth OpenAPI
srcPath = os.path.join("..", "src", "main", "resources", "webapps", appName)
openApiPath = os.path.join(srcPath, "openapi.yml")
enhancedYaml = getEnhancedYaml(appName)
for runIndex in range(1):
outputDir = os.path.join("..", "out", appName, SCHEMATHESIS_OUTPUT, str(runIndex))
command_gtYaml = buildSchemathesisCommand(outputDir=outputDir, openApiPath=openApiPath, baseURL=baseURL)
if (not rerun) and os.path.exists(os.path.join(outputDir, constants.CASETTE_YAML)):
# There is a previous execution and rerun is disabled
results.append({"command": command_gtYaml, "status": STATUS_SKIPPED, "message": "previous execution data exists"})
else:
commands.append({"command":command_gtYaml, "outputDir":outputDir})
if (enhancedYaml is not None) and len(enhancedYaml['carverYaml']) > 0:
# For Carver Enhanced OpenAPI
outputDir = os.path.join("..", "out", appName, constants.SCHEMATHESIS_CARVER, str(runIndex))
openApiPath = enhancedYaml['carverYaml'][0]
command_carverYaml = buildSchemathesisCommand(outputDir=outputDir, openApiPath=openApiPath, baseURL=baseURL)
if (not rerun) and os.path.exists(os.path.join(outputDir, constants.CASETTE_YAML)):
# There is a previous execution and rerun is disabled
results.append({"command": command_carverYaml, "status": STATUS_SKIPPED, "message": "previous execution data exists"})
else:
commands.append({"command":command_carverYaml, "outputDir":outputDir})
if (enhancedYaml is not None) and len(enhancedYaml['proberYaml']) > 0:
# For Carver Enhanced OpenAPI
outputDir = os.path.join("..", "out", appName, constants.SCHEMATHESIS_PROBER, str(runIndex))
openApiPath = enhancedYaml['proberYaml'][0]
command_proberYaml = buildSchemathesisCommand(outputDir=outputDir, openApiPath=openApiPath, baseURL=baseURL)
if (not rerun) and os.path.exists(os.path.join(outputDir, constants.CASETTE_YAML)):
# There is a previous execution and rerun is disabled
results.append({"command": command_proberYaml, "status": STATUS_SKIPPED, "message": "previous execution data exists"})
else:
commands.append({"command":command_proberYaml, "outputDir":outputDir})
for command in commands:
if DRY_RUN:
results.append({"command": command["command"], "status": STATUS_SUCCESSFUL, "message": "DRYRUN"})
continue
SLEEPTIME = 30
if appName == "shopizer":
SLEEPTIME = 120
restartDocker(appName, SLEEPTIME)
print("sending command {0}".format(command["command"]))
proc = startProcess(command["command"], logFile, changeDir=None)
if proc == None:
print("Ignoring error command.")
results.append({"command": command["command"], "status": STATUS_ERRORED, "message": "Command could not be executed"})
continue
status = monitorProcess(proc, timeStep=30)
print("Done : {0}".format(command["command"]))
cleanup(MODE.ST, appName, os.path.join(command["outputDir"], "cov"))
results.append({"command": command["command"], "status": STATUS_SUCCESSFUL, "message": "Succesful"})
# if DRY_RUN:
# status = STATUS_SUCCESSFUL
# return results
#
# if isDockerized(appName):
# restartDocker(getDockerName(appName))
return results
def getExistingTest():
for app in APPS:
print(getExistingRuns(app))
DRY_RUN = Falser
excludeApps = ['tmf', 'mdh', 'shopizer']
if __name__ == "__main__":
print("hello")
# getExistingTest()
runAllApps()
| 6,970 | 39.063218 | 134 | py |
apicarver | apicarver-main/testCarver/pythonCode/rq1_executionTime.py | import glob
import os.path
from datetime import datetime
import utilsRun
from constants import APPS
from coverageStats import getCovFiles
from runCarver import getExistingCarverRun
from runGeneratedTests import getCrawlsToAnalyze, getExistingCrawl
from utilsRun import importJson
def findAllOutputs(ALL_CRAWLS="../crawlOut"):
allOutputs = {}
for appName in APPS:
try:
# print(appName)
algoStr = "HYBRID"
threshold = "-1.0"
existingCrawlData = getExistingCrawl(appName, algoStr, threshold, 30, ALL_CRAWLS = ALL_CRAWLS)
existingValidCrawls = existingCrawlData['existingValidCrawls']
crawljaxOutputPath = existingCrawlData['path']
# print(existingValidCrawls[0])
existingCarverData = getExistingCarverRun(appName)
existingValidCarverOutputs = existingCarverData['existingValidCrawls']
carverOutputPath = existingCarverData['path']
# print(existingValidCarverOutputs[0])
outputs = {"carver": existingValidCarverOutputs[0], "crawler": existingValidCrawls[0], "success": True}
except Exception as ex:
outputs = {"success": False, "message": "error finding outputs"}
print(ex)
allOutputs[appName] = outputs
return allOutputs
def getExecutionTime(outputs):
duration = {}
validCrawl = outputs['crawler']
validCarve = outputs['carver']
try:
crawlPath, file = os.path.split(validCrawl)
testExecutionResultFile = glob.glob(crawlPath + "/test-results/0/testRunInfo.json")[0]
executionData = importJson(jsonFile=testExecutionResultFile)
executionTime = executionData['duration']
# print("Crawler time {}".format(executionTime))
# print("Crawler time {}".format(int(executionTime)))
duration['crawler'] = int(executionTime)
except Exception as ex:
print(ex)
print("Exception getting UI test execution data")
duration['crawler'] = None
try:
carvePath, file = os.path.split(validCarve)
carveResultFile = glob.glob(carvePath + "/run/*/resultResponses.json")[0]
carverResults = importJson(carveResultFile)
executionTime = 0
for apiResult in carverResults:
executionTime += apiResult['duration']
# print("Carver time {}".format(executionTime))
duration['carver'] = executionTime
except Exception as ex:
print(ex)
print("Unable to find carver execution time")
duration['carver'] = None
return duration
def getCoverageData(app):
print(getCovFiles(app))
if __name__ == "__main__":
allOutputs = findAllOutputs()
print(allOutputs)
durations = []
coverages = []
for app in APPS:
if allOutputs[app]['success']:
duration = getExecutionTime(allOutputs[app])
duration['app'] = app
durations.append(duration)
else:
print("Cannot get results for {}".format(app))
duration = {'app': app, 'result' : "error"}
print(durations)
utilsRun.writeCSV_Dict(durations[0].keys(), csvRows=durations,dst="../results/durations_"+datetime.now().strftime("%Y%m%d-%H%M%S")+".csv")
# getCoverageData("petclinic") | 3,289 | 37.255814 | 142 | py |
apicarver | apicarver-main/testCarver/pythonCode/runCarver.py | import os
import shutil
from datetime import datetime
# from globalNames import FILTER, THRESHOLD_SETS, DB_SETS, APPS, isDockerized, DOCKER_LOCATION, isNd3App, getHostNames, \
# ALGOS, getDockerName, getDockerList, getURLList
import glob
from constants import APPS, RUN_CARVER_COMMAND, STATUS_SUCCESSFUL, STATUS_SKIPPED, STATUS_ERRORED
from utilsRun import restartDocker, cleanup, monitorProcess, changeDirectory, startProcess
######################## REGRESSION UTILS ##################
######################## ######## ##################
def runAllApps(RUNTIME=30):
succesful = []
unsuccesful = []
skipped = []
for app in APPS:
if app in excludeApps:
continue
status, command = runAlgo(app, RUNTIME)
if status == STATUS_SUCCESSFUL:
succesful.append(command)
elif status == STATUS_SKIPPED:
skipped.append(command)
elif status == STATUS_ERRORED:
unsuccesful.append(command)
print("succesful : {0}".format(str(len(succesful))))
print(succesful)
print("skipped : {0}".format(str(len(skipped))))
print(skipped)
print("unsuccesful : {0}".format(str(len(unsuccesful))))
print(unsuccesful)
if DRY_RUN:
print("Predicted run time : " + str(RUNTIME * len(succesful)))
def getExistingCarverRun(appName, ALL_CRAWLS=os.path.join(os.path.abspath(".."), "out")):
existingValidCrawls = []
crawljaxOutputPath = os.path.abspath(os.path.join(ALL_CRAWLS, appName))
if os.path.exists(crawljaxOutputPath):
existingValidCrawls = glob.glob(crawljaxOutputPath + "/*/uiTest_runResult.json")
return {"path": crawljaxOutputPath, "existingValidCrawls": existingValidCrawls}
return {"path": None, "existingValidCrawls": existingValidCrawls}
def runAlgo(appName, runtime,
logFile=os.path.join("logs", "carverLog_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
rerun=False):
command = RUN_CARVER_COMMAND.copy()
command.append(appName)
command.append(str(runtime))
# host = "localhost"
# if(isDockerized(appName)):
# host = "192.168.99.101"
existingCrawlData = getExistingCarverRun(appName)
existingValidCrawls = existingCrawlData['existingValidCrawls']
crawljaxOutputPath = existingCrawlData['path']
if (not rerun):
if crawljaxOutputPath is not None and os.path.exists(crawljaxOutputPath):
if len(existingValidCrawls) == 0:
# shutil.rmtree(crawljaxOutputPath)
print("No existing output. Continuing to run")
else:
print("Ignoring run because a crawl already exists.")
print("Call with rerun=True for creating a new crawl with the same configuration")
status = STATUS_SKIPPED
return status, command
if DRY_RUN:
status = STATUS_SUCCESSFUL
return status, command
#
# if isDockerized(appName):
# # restartDocker(appName)
# restartDocker(getDockerName(appName))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir="..")
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
return status, command
timeout = 200
status = monitorProcess(proc, runtime, timeout=timeout, crawljaxOutputPath=crawljaxOutputPath,
existing=len(existingValidCrawls))
print("Done : {0}".format(command))
cleanup(appName)
return status, command
###########################################################################
##Tests ############
###########################################################################
def CleanupTest():
cleanup()
print("cleanup tested")
def RestartDockerTest():
restartDocker("dimeshift")
def ChangeDirTest():
current = os.getcwd();
print(os.getcwd())
changeDirectory("..")
print(os.getcwd())
changeDirectory(current)
print(os.getcwd())
def GetExistingTest():
for app in APPS:
print(getExistingCarverRun(app))
###########################################################################
## Main Code ############
###########################################################################
DRY_RUN = False
excludeApps = ['tmf', 'mdh']
if __name__ == "__main__":
print("hello")
# testGetExisting()
runAllApps(30)
| 4,439 | 29.62069 | 121 | py |
longitudinalCOVID | longitudinalCOVID-master/main.py | import argparse
import os
import random
from collections import defaultdict
from copy import copy
import numpy as np
import torch
import data_loader as module_data_loader
import dataset as module_dataset
import model as module_arch
import model.utils.loss as module_loss
import model.utils.metric as module_metric
import trainer as trainer_module
from dataset.DatasetStatic import Phase
from dataset.dataset_utils import Views
from parse_config import ConfigParser, parse_cmd_args
def main(config, resume=None):
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(0)
num_patients = config['dataset']['num_patients']
crossVal_or_test = False
if config['test']:
folds = 1
len_fold = num_patients
crossVal_or_test = True
elif config['dataset']['cross_val']:
folds = config['dataset']['val_fold_num']
len_fold = config['dataset']['val_fold_len']
crossVal_or_test = True
else:
folds, len_fold = 1, 0
if config['dataset']['args']['val_patients']:
raise Exception(
"Please specify validation patients set in config while not using cross-validation or test phase.")
all_patients = [i for i in range(num_patients)]
np.random.shuffle(all_patients)
if resume:
config.resume = resume
logger = config.get_logger('train')
# get function handles of loss and metrics
loss = getattr(module_loss, config['loss'])
metrics = [getattr(module_metric, met) for met in config['metrics']]
# setup data_loader instances
if config['single_view']:
results = defaultdict(list)
for view in list(Views):
_cfg = copy(config)
for fold in range(folds):
logger.info('Fold Number: {}'.format(fold + 1))
logs = train(logger, _cfg, loss, metrics, fold, len_fold, all_patients, crossVal_or_test, view=view)
for k, v in list(logs.items()):
results[k].append(v)
else:
for fold in range(folds):
logger.info('Fold Number: {}'.format(fold + 1))
train(logger, config, loss, metrics, fold, len_fold, all_patients, crossVal_or_test)
def train(logger, config, loss, metrics, fold, len_fold, all_patients, crossVal_or_test, view: Views = None):
logger.info('start trainning: {}'.format(config['dataset']['args']))
print("Cross of test", crossVal_or_test, all_patients, fold, len_fold, flush=True)
if crossVal_or_test:
config['dataset']['args']['val_patients'] = all_patients[fold * len_fold: (fold + 1) * len_fold]
data_loader = None
if len(all_patients) != len(config['dataset']['args']['val_patients']): # if we had any patients left in the train set
dataset = config.retrieve_class('dataset', module_dataset)(**config['dataset']['args'], phase=Phase.TRAIN,
view=view)
data_loader = config.retrieve_class('data_loader', module_data_loader)(**config['data_loader']['args'],
dataset=dataset)
val_dataset = config.retrieve_class('dataset', module_dataset)(**config['dataset']['args'], phase=Phase.VAL,
view=view)
valid_data_loader = config.retrieve_class('data_loader', module_data_loader)(**config['data_loader']['args'],
dataset=val_dataset)
# build model architecture, then print to console
model = config.initialize_class('arch', module_arch)
logger.info(model)
if config['only_validation'] or config['test']:
logger.info('Loading checkpoint: {} ...'.format(config['path']))
path = config["path"]
checkpoint = torch.load(path, map_location=lambda storage, loc: storage)
if 'state_dict' in checkpoint.keys():
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
# build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = config.initialize('optimizer', torch.optim, trainable_params)
lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer)
if view:
config._save_dir = os.path.join(config._save_dir, str(view.name))
config._log_dir = os.path.join(config._log_dir, str(view.name))
os.mkdir(config._save_dir)
os.mkdir(config._log_dir)
trainer = config.retrieve_class('trainer', trainer_module)(model, loss, metrics, optimizer, config, data_loader,
fold, valid_data_loader, lr_scheduler)
return trainer.train()
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)')
args.add_argument('-s', '--single_view', default=False, type=bool,
help='Defines if a single is used per plane orientation')
args.add_argument('-v', '--only_validation', default=False, type=bool,
help='just run validation on a checkpoint model and do no training -- should add argument -p')
args.add_argument('-p', '--path', default=None, type=str, help='path to latest checkpoint (default: None)')
args.add_argument('-t', '--test', default=False, type=bool,
help='to run test phase on all the patients list')
config = ConfigParser(*parse_cmd_args(args))
main(config)
| 6,108 | 41.72028 | 123 | py |
longitudinalCOVID | longitudinalCOVID-master/majority_voting.py | import argparse
import os
import nibabel
import numpy as np
import torch
from scipy.ndimage import rotate
from tqdm import tqdm
import data_loader as module_data_loader
import dataset as module_dataset
import model as module_arch
import model.utils.metric as module_metric
from dataset.DatasetStatic import Phase
from dataset.dataset_utils import Evaluate, Dataset
from parse_config import ConfigParser, parse_cmd_args
'''For Majority Voting and taking mean over all planes'''
def main(config, resume=None):
if config["path"]:
resume = config["path"]
logger = config.get_logger('test')
# setup data_loader instances
dataset = config.retrieve_class('dataset', module_dataset)(
**config['dataset']['args'], phase=Phase.TEST, evaluate=config['evaluate']
)
assert config['data_loader']['args'][
'batch_size'] == 1, "batch_size > 1! Configure batch_size in model config to one."
data_loader = config.retrieve_class('data_loader', module_data_loader)(
dataset=dataset,
batch_size=config['data_loader']['args']['batch_size'],
num_workers=config['data_loader']['args']['num_workers'],
shuffle=False
)
# build model architecture
model = config.initialize_class('arch', module_arch)
logger.info(model)
# get function handles of loss and metrics
metric_fns = [getattr(module_metric, met) for met in config['metrics']]
logger.info('Loading checkpoint: {} ...'.format(resume))
checkpoint = torch.load(resume, map_location=lambda storage, loc: storage)
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
if 'state_dict' in checkpoint.keys():
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
# prepare model for testing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
res = config['dataset']['args']['size']
total_metrics = torch.zeros(len(metric_fns), config['dataset']['args']['n_classes'])
volume_metrics = torch.zeros(len(metric_fns), config['dataset']['args']['n_classes'])
with torch.no_grad():
# setup
volume = 0
axis = 0 # max 2
c = 0
alignment = [(0, 1, 2), (1, 0, 2), (2, 1, 0)]
data_shape = [res, res, res]
output_agg = torch.zeros([config['dataset']['args']['n_classes'], *data_shape]).to(device)
target_agg = torch.zeros([config['dataset']['args']['n_classes'], *data_shape]).to(device)
n_samples = 0
for idx, loaded_data in enumerate(tqdm(data_loader)):
if len(loaded_data) == 6:
# static case
data, target = loaded_data[0], loaded_data[1]
data, target = data.to(device), target.to(device)
output = model(data)
else:
# longitudinal case
x_ref, x, _, target = loaded_data[0], loaded_data[1], loaded_data[2], loaded_data[3]
x_ref, x, target = x_ref.to(device), x.to(device), target.to(device)
output, _ = model(x_ref, x)
for cl in range(output_agg.size()[0]):
x = output_agg[cl].to('cpu').numpy()
y = output[0][cl].to('cpu').numpy()
z = np.transpose(x, alignment[axis])
z[c] += y
output_agg[cl] = torch.tensor(np.transpose(z, alignment[axis])).to(device)
for cl in range(output_agg.size()[0]):
x = target_agg[cl].to('cpu').numpy()
y = target[0][cl].to('cpu').numpy()
z = np.transpose(x, alignment[axis])
z[c] += y
target_agg[cl] = torch.tensor(np.transpose(z, alignment[axis])).to(device)
c += 1
print("C is: ", c, "res is: ", res, flush=True)
if c == res:
axis += 1
c = 0
print("Axis Changed ", axis)
if axis == 3:
print("Volume finished")
path = os.path.join(config.config['trainer']['save_dir'], 'output',
*str(config._save_dir).split(os.sep)[-2:],
str(resume).split(os.sep)[-1][:-4])
os.makedirs(path, exist_ok=True)
axis = 0
label_out = output_agg.argmax(0)
label_target = target_agg.argmax(0)
evaluate_timestep(output_agg.unsqueeze(0), target_agg.unsqueeze(0), label_out, label_target,
metric_fns, config, path, volume,
volume_metrics, total_metrics,
logger)
# inferred whole volume
logger.info('---------------------------------')
logger.info(f'Volume number {int(volume) + 1}:')
for i, met in enumerate(metric_fns):
logger.info(f' {met.__name__}: {volume_metrics[i]}')
volume_metrics = torch.zeros(len(metric_fns))
volume += 1
output_agg = torch.zeros([config['dataset']['args']['n_classes'], *data_shape]).to(device)
target_agg = torch.zeros([config['dataset']['args']['n_classes'], *data_shape]).to(device)
logger.info('================================')
logger.info(f'Averaged over all patients:')
for i, met in enumerate(metric_fns):
logger.info(f' {met.__name__}: {total_metrics[i].item() / n_samples}')
def evaluate_timestep(avg_seg_volume, target_agg, label_out, label_target, metric_fns, config, path, patient,
volume_metrics, total_metrics,
logger):
prefix = f'{config["evaluate"].value}{(int(patient) + 1):02}'
seg_volume = label_out.int().cpu().detach().numpy()
rotated_seg_volume = rotate(rotate(seg_volume, -90, axes=(0, 1)), 90, axes=(1, 2))
nibabel.save(nibabel.Nifti1Image(rotated_seg_volume, np.eye(4)), os.path.join(path, f'{prefix}_seg.nii'))
target_volume = label_target.int().cpu().detach().numpy()
rotated_target_volume = rotate(rotate(target_volume, -90, axes=(0, 1)), 90, axes=(1, 2))
nibabel.save(nibabel.Nifti1Image(rotated_target_volume, np.eye(4)), os.path.join(path, f'{prefix}_target.nii'))
# computing loss, metrics on test set
logger.info(f'Patient {int(patient) + 1}: ')
for i, metric in enumerate(metric_fns):
if metric.__name__.__contains__("loss"):
continue
current_metric = metric(avg_seg_volume, target_agg)
logger.info(f' {metric.__name__}: {current_metric}')
try:
for j in range(current_metric.shape[0]):
volume_metrics[i][j] += current_metric[j]
total_metrics[i][j] += current_metric[j]
except Exception:
print("Invalid metric shape.")
continue
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)')
args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)')
args.add_argument('-e', '--evaluate', default=Evaluate.TEST, type=Evaluate,
help='Either "training" or "test"; Determines the prefix of the folders to use')
args.add_argument('-m', '--dataset_type', default=Dataset.ISBI, type=Dataset, help='Dataset to use')
args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)')
args.add_argument('-p', '--path', default=None, type=str, help='path to latest checkpoint (default: None)')
config = ConfigParser(*parse_cmd_args(args))
main(config)
| 7,995 | 41.084211 | 115 | py |
longitudinalCOVID | longitudinalCOVID-master/trainer/LongitudinalWithProgressionTrainer.py | import numpy
from logger import Mode
from trainer.Trainer import Trainer
from utils.illustration_util import log_visualizations
import torch.nn.functional as F
import torch
class LongitudinalWithProgressionTrainer(Trainer):
"""
Trainer class for training with original loss + difference map loss + reverse order for reference and CT loss
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, data_loader, fold=None,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, loss, metric_ftns, optimizer, config, data_loader, fold, valid_data_loader, lr_scheduler,
len_epoch)
def remap_labels_for_difference(self, output):
covid_noncovid_output_ref = output.argmax(1)
covid_noncovid_output_ref2 = covid_noncovid_output_ref.clone()
covid_noncovid_output_ref2[covid_noncovid_output_ref != 3] = 0
covid_noncovid_output_ref2[covid_noncovid_output_ref == 3] = 1
covid_noncovid_output_ref[covid_noncovid_output_ref >= 2] = 3
covid_noncovid_output_ref[covid_noncovid_output_ref <= 1] = 0
covid_noncovid_output_ref[covid_noncovid_output_ref == 3] = 1
# first mask is for covid/non-covid difference and second mask is for cons/non-cons
return covid_noncovid_output_ref, covid_noncovid_output_ref2
def _process(self, epoch, data_loader, metrics, mode: Mode = Mode.TRAIN):
_len_epoch = self.len_epoch if mode == Mode.TRAIN else self.len_epoch_val
for batch_idx, (x_ref, x, target_ref, target, mismatch, is_mismatch, is_last) in enumerate(data_loader):
x_ref, x, target, target_ref = x_ref.to(self.device), x.to(self.device), target.to(
self.device), target_ref.to(self.device)
if mode == Mode.TRAIN:
self.optimizer.zero_grad()
output, encoded = self.model(x_ref, x)
output_ref = None
if mode == Mode.VAL:
mismatch = mismatch.to(self.device)
output_ref, _ = self.model(mismatch, x_ref)
if mode == Mode.TRAIN:
output_ref, _ = self.model(x, x_ref)
covid_noncovid_output_ref, covid_noncovid_output_ref2 = self.remap_labels_for_difference(output_ref)
covid_noncovid_output, covid_noncovid_output2 = self.remap_labels_for_difference(output)
covid_noncovid_target, covid_noncovid_target2 = self.remap_labels_for_difference(target)
covid_noncovid_target_ref, covid_noncovid_target_ref2 = self.remap_labels_for_difference(target_ref)
difference_output = covid_noncovid_output - covid_noncovid_output_ref
difference_output += 1 # 0,1,2 for difference map labels
difference_output_reverse = covid_noncovid_output2 - covid_noncovid_output_ref2
difference_output_reverse += 1
difference_target = covid_noncovid_target - covid_noncovid_target_ref
difference_target += 1
difference_target_reverse = covid_noncovid_target2 - covid_noncovid_target_ref2
difference_target_reverse += 1
d_output = F.one_hot(difference_output, num_classes=3).permute(0, 3, 1, 2)
d_target = F.one_hot(difference_target, num_classes=3).permute(0, 3, 1, 2)
d_target_reverse = F.one_hot(difference_target_reverse, num_classes=3).permute(0, 3, 1, 2)
d_output_reverse = F.one_hot(difference_output_reverse, num_classes=3).permute(0, 3, 1, 2)
if mode == Mode.VAL:
try:
output_refs = torch.tensor([]).to(self.device)
target_refs = torch.tensor([]).to(self.device)
empty = True
for i in range(x.size(0)):
if not is_mismatch[i]:
empty = False
output_refs = torch.cat((output_refs, output_ref[i].unsqueeze(0)))
target_refs = torch.cat((target_refs, target_ref[i].unsqueeze(0)))
if not empty:
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output_refs, target_refs,
None, mode, False, is_last=is_last)
except Exception as e:
print("Exception in mismatch:", is_mismatch, e)
elif mode == Mode.TRAIN:
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output_ref, target_ref,
None, mode, False, is_last=is_last)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output, d_target, None,
mode, False, True, is_last=is_last)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output_reverse, d_target_reverse,
None,
mode, True, True, is_last=is_last)
loss = self.loss(output, target, output_ref, target_ref, d_output_reverse.float(), d_target_reverse.float())
if mode == Mode.TRAIN:
loss.backward()
self.optimizer.step()
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output, target, loss, mode, False,
is_last=is_last)
if not (batch_idx % self.log_step):
self.logger.info(f'{mode.value} Epoch: {epoch} {self._progress(data_loader, batch_idx,_len_epoch)} Loss: {loss.item():.6f}')
if not (batch_idx % (_len_epoch // 10)):
log_visualizations(self.writer, x_ref, x, output, target, output_ref, target_ref,
difference_output, difference_target, difference_output_reverse,
difference_target_reverse, encoded)
del x_ref, x, target
| 5,986 | 51.982301 | 140 | py |
longitudinalCOVID | longitudinalCOVID-master/trainer/StaticTrainer.py | from logger import Mode
from trainer.Trainer import Trainer
from utils.illustration_util import log_visualizations
import torch.nn.functional as F
class StaticTrainer(Trainer):
"""
Trainer class for base training
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, data_loader, fold=None,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, loss, metric_ftns, optimizer, config, data_loader, fold, valid_data_loader, lr_scheduler,
len_epoch)
def remap_labels_for_difference(self, output):
covid_noncovid_output_ref = output.argmax(1)
covid_noncovid_output_ref2 = covid_noncovid_output_ref.clone()
covid_noncovid_output_ref2[covid_noncovid_output_ref != 3] = 0
covid_noncovid_output_ref2[covid_noncovid_output_ref == 3] = 1
covid_noncovid_output_ref[covid_noncovid_output_ref >= 2] = 3
covid_noncovid_output_ref[covid_noncovid_output_ref <= 1] = 0
covid_noncovid_output_ref[covid_noncovid_output_ref == 3] = 1
# first mask is for covid/non-covid difference and second mask is for cons/non-cons
return covid_noncovid_output_ref, covid_noncovid_output_ref2
def _process(self, epoch, data_loader, metrics, mode: Mode = Mode.TRAIN):
_len_epoch = self.len_epoch if mode == Mode.TRAIN else self.len_epoch_val
for batch_idx, (data, target, data_ref, target_ref, is_one, is_last) in enumerate(data_loader):
data, target, data_ref, target_ref = data.to(self.device), target.to(self.device), data_ref.to(
self.device), target_ref.to(self.device)
if mode == Mode.TRAIN:
self.optimizer.zero_grad()
output = self.model(data)
loss = self.loss(output, target)
if mode == Mode.TRAIN:
loss.backward()
self.optimizer.step()
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output, target, loss, mode, False,
is_last=is_last)
covid_noncovid_output, covid_noncovid_output2 = self.remap_labels_for_difference(output)
covid_noncovid_target, covid_noncovid_target2 = self.remap_labels_for_difference(target)
covid_noncovid_target_ref, covid_noncovid_target_ref2 = self.remap_labels_for_difference(target_ref)
difference_output = None
difference_output_reverse = None
difference_target = None
difference_target_reverse = None
output_ref = None
if mode == Mode.VAL:
output_ref = self.model(data_ref)
covid_noncovid_output_ref, covid_noncovid_output_ref2 = self.remap_labels_for_difference(output_ref)
try:
for i in range(data.size(0)):
if is_one[1]:
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch),
output_ref[i].unsqueeze(0), target_ref[i].unsqueeze(0), None, mode,
False, is_last=is_last)
except Exception as e:
print("Exception in is_one: ", e)
difference_output = covid_noncovid_output - covid_noncovid_output_ref
difference_output += 1
difference_output_reverse = covid_noncovid_output2 - covid_noncovid_output_ref2
difference_output_reverse += 1
difference_target = covid_noncovid_target - covid_noncovid_target_ref
difference_target += 1
difference_target_reverse = covid_noncovid_target2 - covid_noncovid_target_ref2
difference_target_reverse += 1
d_output = F.one_hot(difference_output, num_classes=3).permute(0, 3, 1, 2)
d_target = F.one_hot(difference_target, num_classes=3).permute(0, 3, 1, 2)
d_target_reverse = F.one_hot(difference_target_reverse, num_classes=3).permute(0, 3, 1, 2)
d_output_reverse = F.one_hot(difference_output_reverse, num_classes=3).permute(0, 3, 1, 2)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output, d_target, None,
mode, False, True, is_last=is_last)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output_reverse,
d_target_reverse, None,
mode, True, True, is_last=is_last)
if not (batch_idx % self.log_step):
self.logger.info(f'{mode.value} Epoch: {epoch} {self._progress(data_loader, batch_idx, _len_epoch)} Loss: {loss.item():.6f}')
if not (batch_idx % (_len_epoch // 10)):
if mode == Mode.VAL:
log_visualizations(self.writer, data_ref, data, output, target, output_ref, target_ref,
difference_output, difference_target, difference_output_reverse,
difference_target_reverse, None)
del data, target
| 5,253 | 51.54 | 141 | py |
longitudinalCOVID | longitudinalCOVID-master/trainer/LongitudinalTrainer.py |
import numpy
from logger import Mode
from trainer.Trainer import Trainer
from utils.illustration_util import log_visualizations
import torch.nn.functional as F
import torch
class LongitudinalTrainer(Trainer):
"""
Trainer class
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, data_loader, fold=None,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, loss, metric_ftns, optimizer, config, data_loader, fold, valid_data_loader, lr_scheduler,
len_epoch)
def remap_labels_for_difference(self, output):
covid_noncovid_output_ref = output.argmax(1)
covid_noncovid_output_ref2 = covid_noncovid_output_ref.clone()
covid_noncovid_output_ref2[covid_noncovid_output_ref != 3] = 0
covid_noncovid_output_ref2[covid_noncovid_output_ref == 3] = 1
covid_noncovid_output_ref[covid_noncovid_output_ref >= 2] = 3
covid_noncovid_output_ref[covid_noncovid_output_ref <= 1] = 0
covid_noncovid_output_ref[covid_noncovid_output_ref == 3] = 1
# first mask is for covid/non-covid difference and second mask is for cons/non-cons
return covid_noncovid_output_ref, covid_noncovid_output_ref2
def _process(self, epoch, data_loader, metrics, mode: Mode = Mode.TRAIN):
_len_epoch = self.len_epoch if mode == Mode.TRAIN else self.len_epoch_val
TOY = False # set to True to perform a toy experiment where target and reference CTs are the same
for batch_idx, (x_ref, x, target_ref, target, mismatch, is_mismatch, is_last) in enumerate(data_loader):
x_ref, x, target, target_ref = x_ref.to(self.device), x.to(self.device), target.to(
self.device), target_ref.to(self.device)
if mode == Mode.TRAIN:
self.optimizer.zero_grad()
if not TOY:
output, encoded = self.model(x_ref, x)
else:
output, encoded = self.model(x,x)
loss = self.loss(output, target)
if mode == Mode.TRAIN:
loss.backward()
self.optimizer.step()
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output, target, loss, mode,
is_last=is_last, toy=TOY)
covid_noncovid_output, covid_noncovid_output2 = self.remap_labels_for_difference(output)
covid_noncovid_target, covid_noncovid_target2 = self.remap_labels_for_difference(target)
covid_noncovid_target_ref, covid_noncovid_target_ref2 = self.remap_labels_for_difference(target_ref)
difference_output = None
difference_output_reverse = None
difference_target = None
difference_target_reverse = None
output_ref = None
if mode == Mode.VAL:
mismatch = mismatch.to(self.device)
if not TOY:
output_ref, encoded_ref = self.model(mismatch, x_ref)
else:
output_ref, encoded_ref = self.model(x_ref, x_ref)
covid_noncovid_output_ref, covid_noncovid_output_ref2 = self.remap_labels_for_difference(output_ref)
difference_output = covid_noncovid_output - covid_noncovid_output_ref
difference_output += 1
difference_output_reverse = covid_noncovid_output2 - covid_noncovid_output_ref2
difference_output_reverse += 1
difference_target = covid_noncovid_target - covid_noncovid_target_ref
difference_target += 1
difference_target_reverse = covid_noncovid_target2 - covid_noncovid_target_ref2
difference_target_reverse += 1
d_output = F.one_hot(difference_output, num_classes=3).permute(0, 3, 1, 2)
d_target = F.one_hot(difference_target, num_classes=3).permute(0, 3, 1, 2)
d_target_reverse = F.one_hot(difference_target_reverse, num_classes=3).permute(0, 3, 1, 2)
d_output_reverse = F.one_hot(difference_output_reverse, num_classes=3).permute(0, 3, 1, 2)
try:
output_refs = torch.tensor([]).to(self.device)
target_refs = torch.tensor([]).to(self.device)
empty = True
for i in range(x.size(0)):
if not is_mismatch[i]:
empty = False
output_refs = torch.cat((output_refs, output_ref[i].unsqueeze(0)))
target_refs = torch.cat((target_refs, target_ref[i].unsqueeze(0)))
if not empty:
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output_refs, target_refs,
None, mode, False, is_last=is_last, toy=TOY)
except Exception as e:
print("Exception in mismatch:", is_mismatch, e)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output, d_target, None,
mode, False, True, is_last=is_last, toy=TOY)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output_reverse,
d_target_reverse, None,
mode, True, True, is_last=is_last, toy=TOY)
if not (batch_idx % self.log_step):
self.logger.info(f'{mode.value} Epoch: {epoch} {self._progress(data_loader, batch_idx,_len_epoch)} Loss: {loss.item():.6f}')
if not (batch_idx % (_len_epoch // 10)):
log_visualizations(self.writer, x_ref, x, output, target, output_ref, target_ref,
difference_output, difference_target, difference_output_reverse,
difference_target_reverse, encoded, toy=TOY)
del x_ref, x, target, target_ref, mismatch
| 6,063 | 47.512 | 140 | py |
longitudinalCOVID | longitudinalCOVID-master/trainer/Trainer.py | from abc import abstractmethod
import numpy as np
import torch
from base import BaseTrainer
from logger import Mode
from utils import MetricTracker
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, data_loader, fold=None,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, loss, metric_ftns, optimizer, config, fold)
self.config = config
self.data_loader = data_loader
self.valid_data_loader = valid_data_loader,
self.do_validation = self.valid_data_loader is not None
self.do_training = self.data_loader is not None
if self.valid_data_loader.__class__.__name__ == 'tuple':
self.valid_data_loader = self.valid_data_loader[0]
if len_epoch is None:
# epoch-based training
self.len_epoch = len(self.data_loader) if self.do_training else 0
self.len_epoch_val = len(self.valid_data_loader) if self.do_validation else 0
self.lr_scheduler = lr_scheduler
self.log_step = int(np.sqrt(data_loader.batch_size)) if self.do_training else int(np.sqrt(valid_data_loader.batch_size))
self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer, logger= self.logger)
self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer, logger=self.logger)
@abstractmethod
def _process(self, epoch, data_loader, metrics, mode: Mode = Mode.TRAIN):
raise NotImplementedError('Method _process() from Trainer class has to be implemented!')
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
if self.do_training:
self.model.train()
self.train_metrics.reset()
self._process(epoch, self.data_loader, self.train_metrics, Mode.TRAIN)
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{'val_' + k: v for k, v in val_log.items()})
if self.do_training and self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.model.eval()
self.valid_metrics.reset()
with torch.no_grad():
self._process(epoch, self.valid_data_loader, self.valid_metrics, Mode.VAL)
# add histogram of model parameters to the tensorboard
for name, p in self.model.named_parameters():
self.writer.add_histogram(name, p, bins='auto')
return self.valid_metrics.result()
def log_scalars(self, metrics, step, output, target, loss, mode=Mode.TRAIN, reverse=False, difference= False, toy=False,
is_last=None):
if is_last is None:
is_last = [False] * target.size(0)
if not difference:
self.writer.set_step(step, mode)
if loss is not None:
metrics.update('loss', loss.item())
for met in self.metric_ftns:
if met.__name__ in ["vd, LTPR, LFPR"]:
for i in range(target.size(0)):
if not is_last[i]:
met(output[i].unsqueeze(0), target[i].unsqueeze(0), is_last[i])
continue
metrics.update(met.__name__, met(output[i].unsqueeze(0), target[i].unsqueeze(0)))
metrics.update(met.__name__, met(output, target))
elif not reverse:
self.writer.set_step(step, mode)
for met in self.metric_ftns:
if met.__name__ in ["LFPR", "LTPR"]:
continue
if met.__name__ in ["vd"]:
for i in range(target.size(0)):
if not is_last[i]:
met(output[i].unsqueeze(0), target[i].unsqueeze[0], is_last[i])
continue
metrics.update(met.__name__, met(output[i].unsqueeze(0), target[i].unsqueeze(0)))
metrics.update(met.__name__ + "_difference", met(output, target))
else:
self.writer.set_step(step, mode)
last_metric = self.metric_ftns[-1].__name__
for met in self.metric_ftns:
if met.__name__ in ["LFPR","LTPR"]:
continue
if met.__name__ in ["vd"]:
for i in range(target.size(0)):
if not is_last[i]:
met(output[i].unsqueeze(0), target[i].unsqueeze(0), is_last)
continue
metrics.update(met.__name__, met(output[i].unsqueeze(0), target[i].unsqueeze(0)))
if met.__name__ in [last_metric]:
metrics.update(met.__name__ + "_difference_reverse", met(output, target), is_last=is_last)
metrics.update(met.__name__ + "_difference_reverse", met(output, target))
@staticmethod
def _progress(data_loader, batch_idx, batches):
base = '[{}/{} ({:.0f}%)]'
if hasattr(data_loader, 'n_samples'):
current = batch_idx * data_loader.batch_size
total = data_loader.n_samples
else:
current = batch_idx
total = batches
return base.format(current, total, 100.0 * current / total)
@staticmethod
def get_step(batch_idx, epoch, len_epoch):
return (epoch - 1) * len_epoch + batch_idx
| 5,947 | 40.594406 | 132 | py |
longitudinalCOVID | longitudinalCOVID-master/data_loader/Dataloader.py | from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, WeightedRandomSampler
from torch.utils.data.dataloader import default_collate
import numpy as np
class Dataloader(DataLoader):
"""
data loading -- uncomment the commented lines for reverse weight sampling the classes
"""
def __init__(self, dataset, batch_size, shuffle=True, num_workers=1):
self.dataset = dataset
# self.weights = np.array(self.dataset.number_of_classes)
# self.weights = 1 / self.weights
# self.weights = self.weights / sum(self.weights)
# self.balance = self.dataset.weights
self.shuffle = shuffle
self.batch_idx = 0
if self.shuffle:
self.sampler = RandomSampler(self.dataset) # Replace with: WeightedRandomSampler(self.balance, len(self.dataset))
else:
self.sampler = SequentialSampler(self.dataset)
self.shuffle = False
self.init_kwargs = {
'dataset': self.dataset,
'batch_size': batch_size,
'shuffle': self.shuffle,
'collate_fn': default_collate,
'num_workers': num_workers
}
super().__init__(sampler=self.sampler, **self.init_kwargs)
| 1,242 | 34.514286 | 125 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/rigid_and_deformable_registration.py | from pathlib import Path
import SimpleITK as sitk
import numpy as np
import sys
import torch
import nibabel as nib
from skimage.transform import resize
def iteration_callback(filter):
global itr
print("deformable iter:", itr, "loss:", filter.GetMetricValue(), flush=True)
itr += 1
def save(filter, fixed, moving, fct, mct, fpathology, mpathology):
m = sitk.GetArrayFromImage(sitk.Resample(moving, fixed, filter,
sitk.sitkLinear, 0.0,
moving.GetPixelIDValue()))
mct = resize(mct, fct.shape)
mct = sitk.GetImageFromArray(mct, False)
mct = sitk.GetArrayFromImage(sitk.Resample(mct, fixed, filter,
sitk.sitkLinear, 0.0,
mct.GetPixelIDValue()))
if mpathology is not None:
mpathology = resize(mpathology, fpathology.shape, order=0)
mpathology = sitk.GetImageFromArray(mpathology, False)
mpathology = sitk.GetArrayFromImage(sitk.Resample(mpathology, fixed, filter,
sitk.sitkLinear, 0.0,
mpathology.GetPixelIDValue()))
return m, mct, mpathology
def log_rigid():
global iteration
print("rigid iter:", iteration, flush=True)
iteration += 1
def rigid_registration(f, m):
transform = sitk.CenteredTransformInitializer(f,
m,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# multi-resolution rigid registration using Mutual Information
registration_m = sitk.ImageRegistrationMethod()
registration_m.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_m.SetMetricSamplingStrategy(registration_m.RANDOM)
registration_m.SetMetricSamplingPercentage(0.01)
registration_m.SetInterpolator(sitk.sitkLinear)
registration_m.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=100,
convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
registration_m.SetOptimizerScalesFromPhysicalShift()
registration_m.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_m.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_m.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_m.SetInitialTransform(transform)
# add iteration callback, save central slice in xy, xz, yz planes
global iteration_number
iteration_number = 0
registration_m.AddCommand(sitk.sitkIterationEvent,
lambda: log_rigid())
rigid_transformation = registration_m.Execute(f, m)
m = sitk.Resample(m, f, rigid_transformation, sitk.sitkLinear, 0.0,
m.GetPixelIDValue())
print("rigid registration finished.", flush=True)
return f, m
itr = 0
iteration = 1
def deformable_registration(fixed_image, moving_image, fixed_ct, moving_ct, fixed_pathology, moving_pathology):
moving_image = resize(moving_image, fixed_image.shape, order=0)
fixed_image = sitk.GetImageFromArray(fixed_image, False)
moving_image = sitk.GetImageFromArray(moving_image, False)
# uncommnet to do rigid registration first
# fixed_image, moving_image = rigid_registration(fixed_image,moving_image)
registration_method = sitk.ImageRegistrationMethod()
# Determine the number of BSpline control points using the physical
# spacing we want for the finest resolution control grid.
grid_physical_spacing = [50.0, 50.0, 50.0] # A control point every 50mm
image_physical_size = [size * spacing for size, spacing in zip(fixed_image.GetSize(), fixed_image.GetSpacing())]
mesh_size = [int(image_size / grid_spacing + 0.5) \
for image_size, grid_spacing in zip(image_physical_size, grid_physical_spacing)]
# The starting mesh size will be 1/4 of the original, it will be refined by
# the multi-resolution framework.
mesh_size = [int(sz / 4 + 0.5) for sz in mesh_size]
initial_transform = sitk.BSplineTransformInitializer(image1=fixed_image,
transformDomainMeshSize=mesh_size, order=3)
# Instead of the standard SetInitialTransform we use the BSpline specific method which also
# accepts the scaleFactors parameter to refine the BSpline mesh. In this case we start with
# the given mesh_size at the highest pyramid level then we double it in the next lower level and
# in the full resolution image we use a mesh that is four times the original size.
registration_method.SetInitialTransformAsBSpline(initial_transform,
inPlace=False,
scaleFactors=[1, 2, 4])
registration_method.SetMetricAsMeanSquares()
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=50,
convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: iteration_callback(registration_method))
global itr
itr = 0
final_transformation = registration_method.Execute(fixed_image, moving_image)
m, mct, mpathology = save(final_transformation, fixed_image, moving_image, fixed_ct, moving_ct, fixed_pathology,
moving_pathology)
print(final_transformation, flush=True)
print('\nOptimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
return m, mct, mpathology
| 6,550 | 45.792857 | 118 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/DatasetStatic.py | import os
import sys
import h5py
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from matplotlib import cm
from skimage.transform import resize
from torch.utils.data import Dataset
from pathlib import Path
from skimage import feature
from torchvision.transforms import transforms
from dataset.dataset_utils import Phase, Modalities, Views, Mode, retrieve_data_dir_paths, Evaluate
class DatasetStatic(Dataset):
"""DatasetStatic dataset"""
def __init__(self, data_dir, phase=Phase.TRAIN, modalities=(), val_patients=None, evaluate: Evaluate = Evaluate.TRAINING, preprocess=True, size=300, n_classes=5,
view: Views = None):
self.modalities = list(map(lambda x: Modalities(x), modalities))
self.size = size
self.n_classes = n_classes
self.data_dir_paths = retrieve_data_dir_paths(data_dir, evaluate, phase, preprocess, val_patients, Mode.STATIC, size, view)
def __len__(self):
return len(self.data_dir_paths)
def crop_center(self, img, cropx, cropy):
z, y, x = img.shape
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
return img[:, starty:starty + cropy, startx:startx + cropx]
def __getitem__(self, idx):
data, label = [], None
slice = int(self.data_dir_paths[idx].split("/")[-1])
view = int(self.data_dir_paths[idx].split("/")[-2])
try:
if idx + 1 >= self.__len__():
is_last = True
else:
next_one = self.data_dir_paths[idx + 1]
next_slice = int(next_one.split("/")[-1])
is_last = next_slice <= slice and view == 2
except Exception as e:
print("IS_LAST Exception", e)
is_last = True
for i, modality in enumerate(self.modalities):
try:
with h5py.File(os.path.join(self.data_dir_paths[idx], f'{modality.value}.h5'), 'r') as f:
data.append(f['data'][()])
if label is None:
label = f['label'][()]
label[label > self.n_classes - 1] = self.n_classes - 1
label = F.one_hot(torch.as_tensor(label, dtype=torch.int64), num_classes=self.n_classes).permute(2, 0, 1)
except Exception as e:
print("EXCEPTION in loading data!: ", e)
return self.__getitem__(idx+1)
mismatch, mismatch_label = [], None
print(self.data_dir_paths[idx], flush=True)
is_one = False
if self.data_dir_paths[idx].__contains__("2_2"):
mismatch_path = self.data_dir_paths[idx].replace("2/2_2", "1/1_2")
is_one = True
elif self.data_dir_paths[idx].__contains__("1_1"):
mismatch_path = self.data_dir_paths[idx].replace("1/1_1", "2/2_1")
else:
mismatch_path = self.data_dir_paths[idx].replace("3/3_3", "2/2_3")
for i, modality in enumerate(self.modalities):
with h5py.File(os.path.join(mismatch_path, f'{modality.value}.h5'), 'r') as f:
mismatch.append(f['data'][()])
mismatch_label = torch.as_tensor(f['label'][()], dtype=torch.int64)
mismatch_label[mismatch_label > self.n_classes - 1] = self.n_classes - 1
mismatch_label = F.one_hot(mismatch_label, num_classes=self.n_classes).permute(2, 0, 1)
data = np.array(data)
if data.shape != (1,self.size, self.size):
print("INCORRECT SHAPE", self.data_dir_paths[idx], data.shape, label.shape, flush=True)
data = resize(data,(1,self.size, self.size))
label = resize(label, (self.n_classes, self.size, self.size), order=0)
mismatch = np.array(mismatch)
if mismatch.shape != (1,self.size, self.size):
print("INCORRECT SHAPE mismatch", mismatch_path, mismatch.shape, mismatch_label.shape , flush=True)
mismatch = resize(mismatch, (1,self.size, self.size))
mismatch_label = resize(mismatch_label, (self.n_classes, self.size, self.size), order=0)
mismatch = torch.as_tensor(mismatch)
data = torch.as_tensor(data).float()
return data.float(), torch.as_tensor(label).float(), mismatch.float(), torch.as_tensor(mismatch_label).float(), is_one, is_last
| 4,367 | 43.571429 | 165 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/DatasetLongitudinal.py | import os
import h5py
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from skimage import feature
from skimage.transform import resize
from torch.utils.data import Dataset
from torchvision import transforms
from dataset.dataset_utils import Phase, Modalities, Mode, retrieve_data_dir_paths, Evaluate
class DatasetLongitudinal(Dataset):
"""DatasetLongitudinal dataset"""
def __init__(self, data_dir, phase=Phase.TRAIN, modalities=(), val_patients=None,
evaluate: Evaluate = Evaluate.TRAINING, size=300, n_classes=5, preprocess=True, view=None):
self.modalities = list(map(lambda x: Modalities(x), modalities))
self.phase = phase
self.size = size
self.n_classes = n_classes
self.data_dir_paths = retrieve_data_dir_paths(data_dir, evaluate, phase, preprocess, val_patients,
Mode.LONGITUDINAL, size, view)
self.transforms = transforms.Compose([transforms.RandomRotation(10),
transforms.RandomAffine((0, 0), translate=(0,0.25))]) # use for augmentation
def __len__(self):
return len(self.data_dir_paths)
def crop_center(self, img, cropx, cropy):
z, y, x = img.shape
startx = x // 2 - (cropx // 2)
return img[:, :cropy, startx:startx + cropx]
def __getitem__(self, idx):
x_ref, x, ref_label, label = [], [], None, None
x_ref_path, x_path = self.data_dir_paths[idx]
slice = int(x_path.split("/")[-1])
view = int(x_path.split("/")[-2])
try:
if idx + 1 >= self.__len__(): # is_last is used for LTPR, LFPR and VD metrics -- can be omitted it from the code if not using these metrics
is_last = True
else:
next_one = self.data_dir_paths[idx + 1][1]
next_slice = int(next_one.split("/")[-1])
is_last = next_slice <= slice and view == 2
except:
is_last = True
print("Exception in extracting next slice")
for i, modality in enumerate(self.modalities):
with h5py.File(os.path.join(x_ref_path, f'{modality.value}.h5'), 'r') as f:
x_ref.append(f['data'][()])
if ref_label is None:
ref_label = torch.as_tensor(f['label'][()], dtype=torch.int64)
ref_label[ref_label > self.n_classes - 1] = self.n_classes - 1
ref_label = F.one_hot(ref_label, num_classes=self.n_classes).permute(2, 0, 1)
with h5py.File(os.path.join(x_path, f'{modality.value}.h5'), 'r') as f:
x.append(f['data'][()])
if label is None:
try:
label = torch.as_tensor(f['label'][()], dtype=torch.int64)
label[label > self.n_classes - 1] = self.n_classes - 1
label = F.one_hot(label, num_classes=self.n_classes).permute(2, 0, 1) # volume
except Exception:
return self.__getitem__(idx + 1)
mismatch = []
is_mismatch = False # For patients with 3 scans, scan 2 is always referenced by scan 1 (hence the mismatch), scan 3 by scan 2, and scan 1 by scan 2.
mismatch_path = None
if self.data_dir_paths[idx][0].__contains__("2_3"):
mismatch_path = self.data_dir_paths[idx][0].replace("2/2_3", "1/1_3")
for i, modality in enumerate(self.modalities):
with h5py.File(os.path.join(mismatch_path, f'{modality.value}.h5'), 'r') as f:
mismatch.append(f['data'][()])
is_mismatch = True
x = np.array(x)
x_ref = np.array(x_ref)
if x.shape != (1, self.size, self.size):
print("INCORRECT SHAPE", x_path, x.shape, label.shape, flush=True)
x = resize(x, (1, self.size, self.size))
label = resize(label, (self.n_classes, self.size, self.size), order=0)
if x_ref.shape != (1, self.size, self.size):
print("INCORRECT SHAPE", x_ref_path, x_ref.shape, ref_label.shape, flush=True)
x_ref = resize(x_ref, (1, self.size, self.size))
ref_label = resize(ref_label, (self.n_classes, self.size, self.size), order=0)
if not len(mismatch):
mismatch = x
else:
mismatch = np.array(mismatch)
if mismatch.shape != (1, self.size, self.size):
print("INCORRECT SHAPE mismatch", mismatch_path, mismatch.shape, flush=True)
mismatch = resize(mismatch, (1, self.size, self.size))
mismatch = torch.as_tensor(mismatch)
x = torch.as_tensor(x)
x_ref = torch.as_tensor(x_ref)
return x_ref.float(), x.float(), torch.as_tensor(ref_label).float(), torch.as_tensor(
label).float(), mismatch.float(), is_mismatch, is_last
| 4,962 | 46.721154 | 157 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/dynamic/util.py | from pathlib import Path
import yaml
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import hashlib
import torch
def load_config_yaml(path):
"""loads a yaml config from file and returns a dict"""
path = Path(path)
with open(path) as file:
cfg = yaml.full_load(file)
return cfg
def save_config_yaml(path, config):
path = Path(path)
with open(path, "w") as file:
yaml.dump(config, file)
def split_idxs(idxs_in, test_size=0.1, val_size=0.1, seed=42, shuffle=True):
"""split indices into test, val and train
"""
idxs_out = {}
if test_size > 0:
idxs_out["train"], idxs_out["test"] = train_test_split(
idxs_in, test_size=test_size, shuffle=shuffle, stratify=None, random_state=seed
)
else:
idxs_out["test"] = []
idxs_out["train"] = idxs_in
if val_size > 0:
idxs_out["train"], idxs_out["val"] = train_test_split(
idxs_out["train"],
test_size=val_size / (1 - test_size),
shuffle=True,
stratify=None,
random_state=seed,
)
else:
idxs_out["val"] = []
return idxs_out
def rm_tree(pth: Path):
"""WARNING: deletes path recursively like rm -rf"""
print(f"Recursively deleting '{pth}'")
for child in pth.iterdir():
if child.is_file():
child.unlink()
else:
rm_tree(child)
pth.rmdir()
def get_sha256_hash(path):
"""returns sha256 hash from file found at path"""
return hashlib.sha256(Path(path).read_bytes()).hexdigest()
def save_hash(hash, path):
"""save hash to given path"""
with open(path, "w") as hash_file:
print(hash, file=hash_file, end="")
def load_hash(path):
"""load hash from path"""
with open(path, "r") as hash_file:
return hash_file.read()
def verify_config_hash(config_path, npy_path: Path):
"""checks if config is the same as hashed and return bool"""
hash_path = npy_path / "config_hash.sha256"
if hash_path.is_file():
new_hash = get_sha256_hash(config_path)
old_hash = load_hash(hash_path)
if new_hash == old_hash:
return True
return False
def save_config_hash(config_path, npy_path: Path):
"""saves hash of given config"""
cfg_hash = get_sha256_hash(config_path)
hash_path = npy_path / "config_hash.sha256"
save_hash(cfg_hash, hash_path)
def make_config(cfg, dyndata_path):
"""write a config yaml file based on the cfg dictionary provided"""
pp_path = dyndata_path
setup_yml_path = pp_path / "setup.yml"
assert Path(
setup_yml_path
).is_file(), f"setup yaml could not be found at '{setup_yml_path}'"
setup = load_config_yaml(setup_yml_path)
cfg["setup_hash"] = get_sha256_hash(setup_yml_path)
if "labels" not in cfg.keys():
assert (
"labelmap" in cfg.keys()
), "labelmap needs to be specified check setup script"
labels_dict = setup["labels"][cfg["labelmap"]]
cfg["labels"] = sorted(labels_dict, key=labels_dict.get)
cfg_path = (pp_path / f"config_{cfg['name']}.yml").absolute()
save_config_yaml(cfg_path, cfg)
print(
f"'{cfg['name']}' config for '{setup['name']}' dataset \nwas successfully saved to '{cfg_path}'"
)
def to_crop_padded_tensor_3d(data, out_dims=[64, 64, 64], padding_value=0):
""" pads a list of numpy arrays to given output dimension and
returns one big tensor """
num_chan = data.shape[0]
data = torch.from_numpy(data)
out_shape = [num_chan, *out_dims]
out_dims = torch.tensor(out_dims)
out_tensor = torch.full(size=out_shape, fill_value=padding_value, dtype=data.dtype)
for i in range(num_chan):
in_dims = torch.tensor(data[i].shape)
padding = (out_dims - in_dims) / 2
start = padding.clone()
start_data = torch.zeros_like(padding)
end_data = in_dims.clone()
end = padding + in_dims
# check if tensor needs to be cropped
for d in range(3):
if in_dims[d] > out_dims[d]:
start[d] = 0
start_data[d] = -padding[d]
end[d] = out_dims[d]
end_data[d] = start_data[d] + out_dims[d]
out_tensor[
i, start[0]:end[0], start[1]:end[1], start[2]:end[2]
] = data[i, start_data[0]:end_data[0], start_data[1]:end_data[1], start_data[2]:end_data[2]]
return out_tensor
def random_narrow_tensor(tensors, narrow_size, dim=0, include="center", ignore_bg=True):
non_zero = (
tensors[1][ignore_bg:] != 0
).nonzero() # Contains non-zero indices for all 4 dims
h_min = non_zero[:, dim].min()
h_max = non_zero[:, dim].max()
if include == "target":
start_slice = int(
np.clip(
(h_min + (((h_max - h_min) - narrow_size)) * np.random.random()),
0,
tensors[0].size(dim) - narrow_size,
)
)
elif include == "center":
start_slice = int(
np.clip(
((h_min + (h_max - h_min) / 2) - narrow_size / 2),
0,
tensors[0].size(dim) - narrow_size,
)
)
elif include == "random":
start_slice = np.random.randint(tensors[0].size(dim) - narrow_size)
else:
return tensors
for i in range(len(tensors)):
tensors[i] = torch.narrow(tensors[i], dim, start_slice, narrow_size)
return tensors
def crop_to_mask(data, seg,
lung): # crops segmentation mask and data to where the lung mask (and segmentation) mask are non-zero
"""
crop data and return non-zero mask
inspired by nnunet and stackoverflow
# """
crop_threshold = -1000000000
mask = np.zeros(data.shape, dtype=bool)
# non zero mask over all channels
cmask = data > crop_threshold
mask = cmask | mask
# non black coordinates
coords = np.argwhere(mask)
# bounding box
x_min, y_min, z_min = coords.min(axis=0)
x_max, y_max, z_max = coords.max(axis=0) + 1 # include top slice
cropped_data = data[x_min:x_max, y_min:y_max, z_min:z_max]
cropped_seg = seg[x_min:x_max, y_min:y_max, z_min:z_max]
cropped_lung = lung[x_min:x_max, y_min:y_max, z_min:z_max]
mask = mask[x_min:x_max, y_min:y_max, z_min:z_max]
coords = np.argwhere(cropped_seg)
coords2 = np.argwhere(cropped_lung)
# bounding box
x_min, y_min, z_min = np.concatenate((np.array([coords2.min(axis=0)]),np.array([coords.min(axis=0)])), axis=0).min(axis=0) # change to : 'coords2.min(axis=0)' for only considering lung mask
x_max, y_max, z_max = np.concatenate((np.array([coords2.max(axis=0)]),np.array([coords.max(axis=0)])), axis=0).max(axis=0) + 1 # include top slice # change to: 'coords2.max(axis=0)' for only considering lung mask
cropped_lung = cropped_lung[x_min:x_max, y_min:y_max, z_min:z_max]
cropped_seg = cropped_seg[x_min:x_max, y_min:y_max, z_min:z_max]
cropped_data = cropped_data[x_min:x_max, y_min:y_max, z_min:z_max]
return np.array(cropped_data), np.array(cropped_seg), np.array(cropped_lung), mask
| 7,244 | 32.082192 | 217 | py |
longitudinalCOVID | longitudinalCOVID-master/logger/visualization.py | import importlib
from datetime import datetime
from enum import Enum
class Mode(Enum):
TRAIN = 'Train'
VAL = 'Val'
class TensorboardWriter():
def __init__(self, log_dir, logger, enabled):
self.writer = None
self.selected_module = ""
if enabled:
log_dir = str(log_dir)
# Retrieve visualization writer.
succeeded = False
for module in ["torch.utils.tensorboard", "tensorboardX"]:
try:
self.writer = importlib.import_module(module).SummaryWriter(log_dir)
succeeded = True
break
except ImportError:
succeeded = False
self.selected_module = module
if not succeeded:
message = "Warning: visualization (Tensorboard) is configured to use, but currently not installed on " \
"this machine. Please install TensorboardX with 'pip install tensorboardx', upgrade PyTorch to " \
"version >= 1.1 to use 'torch.utils.tensorboard' or turn off the option in the 'config.json' file."
logger.warning(message)
self.step = 0
self.mode = None
self.tb_writer_ftns = {
'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio', 'add_graph',
'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
}
self.tag_mode_exceptions = {'add_graph', 'add_histogram', 'add_embedding'}
self.timer = datetime.now()
def set_step(self, step, mode=Mode.TRAIN):
self.mode = mode
self.step = step
if step == 0:
self.timer = datetime.now()
else:
duration = datetime.now() - self.timer
self.add_scalar('steps_per_sec', 1 / duration.total_seconds())
self.timer = datetime.now()
def __getattr__(self, name):
"""
If visualization is configured to use:
return add_data() methods of tensorboard with additional information (step, tag) added.
Otherwise:
return a blank function handle that does nothing
"""
if name in self.tb_writer_ftns:
add_data = getattr(self.writer, name, None)
def wrapper(tag, data, *args, **kwargs):
if add_data is not None:
# add mode(train/valid) tag
if name not in self.tag_mode_exceptions:
tag = '{}/{}'.format(tag, self.mode.value)
add_data(tag, data, self.step, *args, **kwargs)
return wrapper
else:
# default action for returning methods defined in this class, set_step() for instance.
try:
attr = object.__getattr__(name)
except AttributeError:
raise AttributeError("type object '{}' has no attribute '{}'".format(self.selected_module, name))
return attr
| 3,020 | 36.296296 | 125 | py |
longitudinalCOVID | longitudinalCOVID-master/base/base_model.py | from abc import abstractmethod
import numpy as np
import torch.nn as nn
class BaseModel(nn.Module):
"""
Base class for all models
"""
@abstractmethod
def forward(self, *inputs):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super().__str__() + '\nTrainable parameters: {}'.format(params)
| 650 | 22.25 | 79 | py |
longitudinalCOVID | longitudinalCOVID-master/base/base_trainer.py | from abc import abstractmethod
import torch
from numpy import inf
from logger import TensorboardWriter
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, fold=None):
self.config = config
self.logger = config.get_logger('trainer', config['trainer']['verbosity'])
# setup GPU device if available, move model into configured device
self.device, device_ids = self._prepare_device(config['n_gpu'])
self.model = model.to(self.device)
if len(device_ids) > 1:
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
self.loss = loss
self.metric_ftns = metric_ftns
self.optimizer = optimizer
if fold:
self.fold = str(fold)
else:
self.fold = ''
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get('monitor', 'off')
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.mnt_mode = 'off'
self.mnt_best = 0
else:
self.mnt_mode, self.mnt_metric = self.monitor.split()
assert self.mnt_mode in ['min', 'max']
self.mnt_best = inf if self.mnt_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.start_epoch = 1
self.checkpoint_dir = config.save_dir
# setup visualization writer instance
self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])
if config.resume is not None:
self._resume_checkpoint(config.resume)
self.not_improved_count = 0
@abstractmethod
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def train(self):
"""
Full training logic
"""
best_log = None
for epoch in range(self.start_epoch, self.epochs + 1):
result = self._train_epoch(epoch)
# save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
# print logged informations to the screen #IMPORTANT CHANGE
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
# evaluate model performance according to configured metric, save best checkpoint as model_best
best = False
if self.mnt_mode != 'off':
try:
# check whether model performance improved or not, according to specified metric(mnt_metric)
improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \
(self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)
except KeyError:
self.logger.warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.mnt_metric))
self.mnt_mode = 'off'
improved = False
if improved:
self.mnt_best = log[self.mnt_metric]
best_log = log
self.not_improved_count = 0
best = True
else:
self.not_improved_count += 1
if self.not_improved_count > self.early_stop:
self.logger.info("Validation performance hasn\'t improve for {} epochs. Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
return best_log
def _prepare_device(self, n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self.logger.warning("Warning: There\'s no GPU available on this machine, training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self.logger.warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available "
"on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
"""
arch = type(self.model).__name__
state = {
'arch': arch,
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.mnt_best,
'config': self.config
}
filename = f'{str(self.checkpoint_dir)}/checkpoint-epoch{epoch}.pth'
# filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
if save_best:
best_path = f'{str(self.checkpoint_dir)}/model_best'+self.fold+'.pth'
torch.save(state, best_path)
self.logger.info("Saving current best: model_best.pth ...")
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
resume_path = str(resume_path)
self.logger.info("Loading checkpoint: {} ...".format(resume_path))
checkpoint = torch.load(resume_path, map_location=lambda storage, loc: storage)
self.start_epoch = checkpoint['epoch'] + 1
self.mnt_best = checkpoint['monitor_best']
# load architecture params from checkpoint.
if checkpoint['config']['arch'] != self.config['arch']:
self.logger.warning("Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
status = self._load_dict(checkpoint)
self.logger.warning(f'Missing keys: {str(status[0])}') if status[0] else None
self.logger.warning(f'Unexpected keys: {str(status[1])}') if status[1] else None
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger.warning("Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
def _load_dict(self, checkpoint):
return list(self.model.load_state_dict(checkpoint['state_dict'], False))
| 7,505 | 39.354839 | 133 | py |
longitudinalCOVID | longitudinalCOVID-master/base/base_data_loader.py | import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import SubsetRandomSampler
class BaseDataLoader(DataLoader):
"""
Base class for all data loaders
"""
def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate):
self.validation_split = validation_split
self.shuffle = shuffle
self.batch_idx = 0
self.n_samples = len(dataset)
self.sampler, self.valid_sampler = self._split_sampler(self.validation_split)
self.init_kwargs = {
'dataset': dataset,
'batch_size': batch_size,
'shuffle': self.shuffle,
'collate_fn': collate_fn,
'num_workers': num_workers
}
super().__init__(sampler=self.sampler, **self.init_kwargs)
def _split_sampler(self, split):
if split == 0.0:
return None, None
idx_full = np.arange(self.n_samples)
np.random.seed(0)
np.random.shuffle(idx_full)
if isinstance(split, int):
assert split > 0
assert split < self.n_samples, "validation set size is configured to be larger than entire dataset."
len_valid = split
else:
len_valid = int(self.n_samples * split)
valid_idx = idx_full[0:len_valid]
train_idx = np.delete(idx_full, np.arange(0, len_valid))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# turn off shuffle option which is mutually exclusive with sampler
self.shuffle = False
self.n_samples = len(train_idx)
return train_sampler, valid_sampler
def split_validation(self):
if self.valid_sampler is None:
return None
else:
return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)
| 1,971 | 30.301587 | 112 | py |
longitudinalCOVID | longitudinalCOVID-master/utils/illustration_util.py | import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from torchvision.utils import make_grid
from PIL import Image, ImageDraw
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:, :, 0] += np.arange(w)
flow[:, :, 1] += np.arange(h)[:, np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
def visualize_flow(flow):
"""Visualize optical flow
Args:
flow: optical flow map with shape of (H, W, 2), with (y, x) order
Returns:
RGB image of shape (H, W, 3)
"""
assert flow.ndim == 3
assert flow.shape[2] == 2
hsv = np.zeros([flow.shape[0], flow.shape[1], 3], dtype=np.uint8)
mag, ang = cv2.cartToPolar(flow[..., 1], flow[..., 0])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 1] = 255
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return rgb
def visualize_difference(x):
rgbs = []
for i in x:
hsv = np.zeros([i.shape[0], i.shape[1], 3], dtype=np.uint8)
hsv[..., 1] = 255
hsv[..., 2] = 255
hsv[..., 0] = i * 255 // 2 # cv2.normalize(i, None, 0, 255, cv2.NORM_INF)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
rgbs += [rgb]
return np.array(rgbs)
def prepare_encoded(encoded):
encoded = encoded.detach().cpu().numpy().astype('float32')
heatmap = np.mean(encoded, axis=1).squeeze() # mean on the channels
# relu on top of the heatmap
heatmap = np.maximum(heatmap, 0)
# normalize the heatmap
heatmap /= np.max(heatmap)
return heatmap
def log_visualizations(writer, x_ref, x, output, target, output_ref, target_ref, outputDiff,
groundDiff, outputDiffReverse, groundDiffReverse, encoded, toy=False):
batch_size = x.size(0)
x_ref = cast(x_ref)
x = cast(x)
output = cast(output, True, True, True)
target = cast(target, True, True, True)
if output_ref is not None and groundDiff is not None:
outputDiff = visualize_difference(outputDiff.cpu().detach().numpy()).astype('float32')
groundDiff = visualize_difference(groundDiff.cpu().detach().numpy()).astype("float32")
outputDiffReverse = visualize_difference(outputDiffReverse.cpu().detach().numpy()).astype('float32')
groundDiffReverse = visualize_difference(groundDiffReverse.cpu().detach().numpy()).astype("float32")
output_ref = cast(output_ref, True, True, True)
target_ref = cast(target_ref, True, True, True)
if encoded is not None:
encoded = np.reshape(np.array([prepare_encoded(encoded)]), (batch_size, 9, 9, 1))
for i in range(batch_size):
if not toy:
a1, a2, b1, b, c, c1 = x_ref[i], x[i], output_ref[i], target_ref[i], output[i], target[i]
tensor1 = np.expand_dims(np.transpose(np.hstack([a1, a2, b1, b, c, c1]), (2, 0, 1)), axis=0)
writer.add_image('xRef_x_outputRef_targetRef_output_target',
make_grid(torch.as_tensor(tensor1), nrow=8, normalize=False))
else:
a, a1, b, b1, c, c1 = x[i], x_ref[i], output_ref[i], target_ref[i], output[i], target[i]
tensor2 = np.expand_dims(np.transpose(np.hstack([a, a1, b, b1, c, c1]), (2, 0, 1)), axis=0)
writer.add_image('TOY_x_xref_outputRef_targetRef_output_target',
make_grid(torch.as_tensor(tensor2), nrow=8, normalize=False))
if not toy:
d, e, f, g = outputDiff[i], groundDiff[i], outputDiffReverse[i], groundDiffReverse[i]
tensor3 = np.expand_dims(np.transpose(np.hstack([d, e, f, g]), (2, 0, 1)), axis=0)
writer.add_image('outDiff_groundDiff_outDiffReverse_groundDiffReverse',
make_grid(torch.as_tensor(tensor3), nrow=8, normalize=False))
else:
d, e, f, g = outputDiff[i], groundDiff[i], outputDiffReverse[i], groundDiffReverse[i]
tensor4 = np.expand_dims(np.transpose(np.hstack([d, e, f, g]), (2, 0, 1)), axis=0)
writer.add_image('TOY_outDiff_groundDiff_outDiffReverse_groundDiffReverse',
make_grid(torch.as_tensor(tensor4), nrow=100, normalize=False))
if encoded is not None:
if not toy:
encodedd = encoded[i]
tensor5 = np.expand_dims(np.transpose(encodedd, (2, 0, 1)), axis=0)
writer.add_image('encodedLongitudinal',
make_grid(torch.as_tensor(tensor5), nrow=8, normalize=False))
else:
x_toy = encoded[i]
tensor5 = np.expand_dims(np.transpose(x_toy, (2, 0, 1)), axis=0)
writer.add_image('encodedTOY',
make_grid(torch.as_tensor(tensor5), nrow=8, normalize=False))
elif groundDiff is None and output_ref is not None:
for i in range(batch_size):
a1, a2, b, b1, c, c1 = x_ref[i], x[i], output_ref[i], target_ref[i], output[i], target[i]
tensor = np.expand_dims(np.transpose(np.hstack([a1, a2, b, b1, c, c1]), (2, 0, 1)), axis=0)
writer.add_image('xRef_x_outputRef(2)_targetRef_output_target',
make_grid(torch.as_tensor(tensor), nrow=8, normalize=True))
else:
for i in range(batch_size):
a1, a2, b, c = x_ref[i], x[i], output[i], target[i]
tensor = np.expand_dims(np.transpose(np.hstack([a1, a2, b, c]), (2, 0, 1)), axis=0)
writer.add_image('xRef_x_output_target',
make_grid(torch.as_tensor(tensor), nrow=8, normalize=True))
def log_visualizations_deformations(writer, input_moving, input_fixed, flow, target_moving, target_fixed, output=None):
zipped_data = zip(
cast(input_moving),
cast(input_fixed),
cast(flow, normalize_data=False),
cast(target_moving, True),
cast(target_fixed, True),
cast(output, True) if type(None) != type(output) else [None for _ in input_moving]
)
for (_input_moving, _input_fixed, _flow, _target_moving, _target_fixed, _output) in zipped_data:
transposed_flow = np.transpose(_flow, (1, 2, 0))
illustration = [
_input_moving,
_input_fixed,
visualize_flow(transposed_flow) / 255.,
_target_moving,
_target_fixed
]
if type(None) != type(_output):
illustration.append(_output)
tensor = np.expand_dims(np.transpose(np.hstack(illustration), (2, 0, 1)), axis=0)
description = 'xRef_x_flowfield_targetRef_target_output'
writer.add_image(description, make_grid(torch.as_tensor(tensor), nrow=8, normalize=True))
def cast(data, argmax=False, normalize_data=True, mask=False):
data2 = data.cpu().detach().numpy()
if argmax:
data2 = np.argmax(data2, axis=1)
data2 = data2.astype('float32')
if normalize_data:
data2 = np.asarray([normalize(date, mask) for date in data2])
return data2
def normalize(x, mask):
if len(x.shape) > 2:
x = x[0]
if mask:
hsv = np.zeros([x.shape[0], x.shape[1], 3], dtype=np.uint8)
hsv[..., 1] = 255
hsv[..., 2] = 255
hsv[..., 0] = x * 255 // 4
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) # cv2.cvtColor(x * 1/4, cv2.COLOR_GRAY2RGB) #cv2.normalize(x, None, 0, 255, cv2.NORM_MINMAX) for grayscale
return rgb
else:
return cv2.cvtColor(cv2.normalize(x, None, 0, 1, cv2.NORM_MINMAX), cv2.COLOR_GRAY2RGB)
| 7,893 | 40.547368 | 158 | py |
longitudinalCOVID | longitudinalCOVID-master/model/LongitudinalFCDenseNet.py | from base import BaseModel
from model.FCDenseNet import FCDenseNetEncoder, FCDenseNetDecoder
from model.utils.layers import *
class LongitudinalFCDenseNet(BaseModel):
def __init__(self,
in_channels=1, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=48, n_classes=2, encoder=None, siamese=True):
super().__init__()
self.up_blocks = up_blocks
self.densenet_encoder = encoder
self.siamese = siamese
if not encoder:
self.densenet_encoder = FCDenseNetEncoder(in_channels=in_channels * (1 if siamese else 2), down_blocks=down_blocks,
bottleneck_layers=bottleneck_layers,
growth_rate=growth_rate, out_chans_first_conv=out_chans_first_conv)
prev_block_channels = self.densenet_encoder.prev_block_channels
skip_connection_channel_counts = self.densenet_encoder.skip_connection_channel_counts
if self.siamese:
self.add_module('merge_conv', nn.Conv2d(prev_block_channels * 2, prev_block_channels, 1, 1))
self.decoder = FCDenseNetDecoder(prev_block_channels, skip_connection_channel_counts, growth_rate, n_classes, up_blocks)
def forward(self, x_ref, x):
if self.siamese:
out, skip_connections = self.densenet_encoder(x)
out_ref, _ = self.densenet_encoder(x_ref)
out = torch.cat((out, out_ref), dim=1)
out1 = self.merge_conv(out)
else:
out1, skip_connections = self.densenet_encoder(torch.cat((x_ref, x), dim=1))
out = self.decoder(out1, skip_connections)
return out, out1 #returning the encoded featuremap for visualization purposes
| 1,858 | 45.475 | 128 | py |
longitudinalCOVID | longitudinalCOVID-master/model/LateLongitudinalFCDenseNet.py | from base import BaseModel
from model.FCDenseNet import FCDenseNetEncoder, FCDenseNetDecoder
from model.utils.layers import *
class LateLongitudinalFCDenseNet(BaseModel):
def __init__(self,
in_channels=1, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=48, n_classes=2, encoder=None):
super().__init__()
self.up_blocks = up_blocks
self.densenet_encoder = encoder
if not encoder:
self.densenet_encoder = FCDenseNetEncoder(in_channels=in_channels , down_blocks=down_blocks,
bottleneck_layers=bottleneck_layers,
growth_rate=growth_rate, out_chans_first_conv=out_chans_first_conv)
prev_block_channels = 2* self.densenet_encoder.prev_block_channels
skip_connection_channel_counts = self.densenet_encoder.skip_connection_channel_counts
self.decoder = FCDenseNetDecoder(prev_block_channels, skip_connection_channel_counts, growth_rate, n_classes, up_blocks)
def forward(self, x_ref, x):
out1, skip_connections = self.densenet_encoder(x)
out_ref, _ = self.densenet_encoder(x_ref)
out = torch.cat((out1, out_ref), dim=1)
out = self.decoder(out, skip_connections)
return out, out1
| 1,423 | 38.555556 | 128 | py |
longitudinalCOVID | longitudinalCOVID-master/model/utils/metric_utils.py | import numpy as np
import torch
def asymmetric_loss(beta, output, target):
g = flatten(target)
p = flatten(output)
pg = (p * g).sum(-1)
beta_sq = beta ** 2
a = beta_sq / (1 + beta_sq)
b = 1 / (1 + beta_sq)
g_p = ((1 - p) * g).sum(-1)
p_g = (p * (1 - g)).sum(-1)
loss = (1. + pg) / (1. + pg + a * g_p + b * p_g)
total_loss = torch.mean(1. - loss)
return total_loss
def eps_tp_tn_fp_fn(output, target):
with torch.no_grad():
epsilon = 1e-7
target = flatten(target).cpu().detach().float()
output = flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
tp = torch.sum(target * output)
tn = torch.sum((1 - target) * (1 - output))
fp = torch.sum((1 - target) * output)
fn = torch.sum(target * (1 - output))
return epsilon, tp.float(), tn.float(), fp.float(), fn.float()
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
if type(tensor) == torch.Tensor:
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.contiguous().view(C, -1).float()
else:
return torch.as_tensor(tensor.flatten()).float() | 1,646 | 32.612245 | 70 | py |
longitudinalCOVID | longitudinalCOVID-master/model/utils/loss.py | import torch
import torch.nn.functional as F
from model.utils import metric_utils
import numpy as np
def inf(*args):
return torch.as_tensor(float("Inf"))
def gradient_loss(s):
dy = torch.abs(s[:, :, 1:, :] - s[:, :, :-1, :]) ** 2
dx = torch.abs(s[:, :, :, 1:] - s[:, :, :, :-1]) ** 2
return (torch.mean(dx) + torch.mean(dy)) / 2.0
def multitask_loss(warp, flow, output, input_fixed, target_fixed):
lung_mask = torch.zeros_like(target_fixed)
lung_mask[target_fixed != 0] = 1
warp = warp * lung_mask
input_fixed = input_fixed * lung_mask
recon_loss = mse(warp, input_fixed)
grad_loss = gradient_loss(flow)
seg_loss = mse(output, target_fixed)
return recon_loss + 0.01 * grad_loss + seg_loss
def deformation_loss(warp, flow, input_fixed):
recon_loss = mse(warp, input_fixed)
grad_loss = gradient_loss(flow)
return recon_loss + 0.01 * grad_loss
def l1(output, target):
return F.l1_loss(output, target)
def mse(output, target):
return F.mse_loss(output, target)
def mse_difference(output, target, output_ref, target_ref, outDiff, groundDiff):
return F.mse_loss(output, target) + F.mse_loss(output_ref, target_ref) + F.mse_loss(outDiff, groundDiff)
def nll_loss(output, target):
return F.nll_loss(metric_utils.flatten(output), metric_utils.flatten(target))
def dice_loss(output, target, weights):
size = output.size()
outputs = torch.zeros_like(output)
targets = torch.zeros_like(target)
for i in range(size[0]):
for j in range(size[1]):
outputs[i][j] = output[i][j] * weights[j]
targets[i][j] = target[i][j] * weights[j]
return metric_utils.asymmetric_loss(1, output, target)
def asymmetric_loss(output, target):
return metric_utils.asymmetric_loss(2, output, target)
| 1,817 | 26.969231 | 108 | py |
longitudinalCOVID | longitudinalCOVID-master/model/utils/layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class SpatialTransformer(nn.Module):
def __init__(self, size, mode='bilinear'):
super(SpatialTransformer, self).__init__()
vectors = [torch.arange(0, s) for s in size]
grid = torch.unsqueeze(torch.stack(torch.meshgrid(vectors)), dim=0).float()
self.register_buffer('grid', grid)
self.mode = mode
def forward(self, src, flow):
new_locs = self.grid + flow
shape = flow.shape[2:]
for i in range(len(shape)):
new_locs[:, i, ...] = 2 * (new_locs[:, i, ...] / (shape[i] - 1) - 0.5)
new_locs = new_locs.permute(0, 2, 3, 1)
new_locs = new_locs[..., [1, 0]]
return F.grid_sample(src, new_locs, mode=self.mode, align_corners=True)
class DenseLayer(nn.Sequential):
def __init__(self, in_channels, growth_rate):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(True))
self.add_module('conv', nn.Conv2d(in_channels, growth_rate, kernel_size=3, stride=1, padding=1, bias=True))
self.add_module('drop', nn.Dropout2d(0.3))
def forward(self, x):
return super().forward(x)
class DenseBlock(nn.Module):
def __init__(self, in_channels, growth_rate, n_layers, upsample=False):
super().__init__()
self.upsample = upsample
self.layers = nn.ModuleList([DenseLayer(in_channels + i * growth_rate, growth_rate) for i in range(n_layers)])
def forward(self, x):
if self.upsample:
new_features = []
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1)
new_features.append(out)
return torch.cat(new_features, 1)
else:
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1)
return x
class TransitionDown(nn.Sequential):
def __init__(self, in_channels):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(num_features=in_channels))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, bias=True))
self.add_module('drop', nn.Dropout2d(0.2))
self.add_module('maxpool', nn.MaxPool2d(2))
def forward(self, x):
return super().forward(x)
class TransitionUp(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=0, bias=True)
def forward(self, x, skip_x):
out = self.convTrans(x)
out = center_crop(out, skip_x.size(2), skip_x.size(3))
out = torch.cat([out, skip_x], 1)
return out
class Bottleneck(nn.Sequential):
def __init__(self, in_channels, growth_rate, n_layers):
super().__init__()
self.add_module('bottleneck', DenseBlock(in_channels, growth_rate, n_layers, upsample=True))
def forward(self, x):
return super().forward(x)
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:(xy2 + max_height), xy1:(xy1 + max_width)]
| 3,420 | 32.871287 | 142 | py |
longitudinalCOVID | longitudinalCOVID-master/model/utils/metric.py | import numpy as np
import torch
from sklearn.metrics import f1_score, precision_score, recall_score, roc_curve
from medpy import metric
from model.utils import metric_utils
def precision(output, target):
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
return precision_score(target, output, average=None) # average='macro' for macro averaging
def recall(output, target):
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
return recall_score(target, output, average=None) # average='macro' for macro averaging
def dice_loss(output, target):
with torch.no_grad():
return metric_utils.asymmetric_loss(1, output, target)
def dice_score(output, target):
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
f = f1_score(target, output, average=None) # average='macro' for macro averaging
return f
def asymmetric_loss(output, target):
with torch.no_grad():
return metric_utils.asymmetric_loss(2, output, target)
lt1, lt2 = [0] * 5, [0] * 5
def LTPR(output, target, is_last=True):
tprs = []
global lt1, lt2
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
for i in range(5):
output1 = output.clone()
target1 = target.clone()
output1[output1 == i] = 10
output1[output1 != 10] = 0
output1[output1 == 10] = 1
target1[target1 == i] = 10
target1[target1 != 10] = 0
target1[target1 == 10] = 1
output1 = output1.detach().cpu().numpy()
target1 = target1.detach().cpu().numpy()
result = np.atleast_1d(output1.astype(np.bool))
reference = np.atleast_1d(target1.astype(np.bool))
lt1[i] += np.count_nonzero(result * reference)
lt2[i] += np.count_nonzero(reference)
if 0 == lt2[i]:
tpr = None
else:
tpr = lt1[i] / float(lt2[i])
tprs += [tpr]
if is_last:
lt1, lt2 = [0] * 5, [0] * 5
return tprs
else:
return None
lf1, lf2 = [0] * 5, [0] * 5
def LFPR(output, target, is_last=True):
fprs = []
global lf1, lf2
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
for i in range(5):
output1 = output.clone()
target1 = target.clone()
output1[output1 == i] = 10
output1[output1 != 10] = 0
output1[output1 == 10] = 1
target1[target1 == i] = 10
target1[target1 != 10] = 0
target1[target1 == 10] = 1
output1 = output1.detach().cpu().numpy()
target1 = target1.detach().cpu().numpy()
result = np.atleast_1d(output1.astype(np.bool))
reference = np.atleast_1d(target1.astype(np.bool))
lf1[i] += np.count_nonzero(result * (1 - reference))
lf2[i] += np.count_nonzero(reference)
if 0 == lf2[i]:
fpr = None
else:
fpr = lf1[i] / float(lf2[i])
fprs += [fpr]
if is_last:
lf1, lf2 = [0] * 5, [0] * 5
return fprs
else:
return None
vol1 = [0] * 5
vol2 = [0] * 5
def vd(output, target, is_last=True):
vds = []
global vol1, vol2
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
for i in range(5):
output1 = output.clone()
target1 = target.clone()
output1[output1 == i] = 10
output1[output1 != 10] = 0
output1[output1 == 10] = 1
target1[target1 == i] = 10
target1[target1 != 10] = 0
target1[target1 == 10] = 1
output1 = output1.detach().cpu().numpy()
target1 = target1.detach().cpu().numpy()
result = np.atleast_1d(output1.astype(np.bool))
reference = np.atleast_1d(target1.astype(np.bool))
vol1[i] += np.count_nonzero(result)
vol2[i] += np.count_nonzero(reference)
vd = abs(vol1[i] - vol2[i])
vds += [vd]
if is_last:
vol1, vol2 = [0] * 5, [0] * 5
return vds
else:
return None
| 5,769 | 30.703297 | 99 | py |
RAML | RAML-master/incremental/main.py | from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, cityscapes
from utils import ext_transforms as et
from metrics import StreamSegMetrics
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from torch import Tensor
from typing import Tuple
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='../data/cityscapes',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='cityscapes',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=256,
help="num classes (default: None)")
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
# Train Options
parser.add_argument("--finetune", action='store_true', default=False)
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=30000,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.1,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=1000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: True)')
parser.add_argument("--batch_size", type=int, default=6,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=4,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=768)
parser.add_argument("--ckpt", default=None, type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0,1',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
parser.add_argument("--name", type=str, default='',help="download datasets")
parser.add_argument("--output_dir", type=str, default='output', help="output path")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
class BinaryDiceLoss(nn.Module):
def __init__(self, smooth=1, p=2, reduction='mean'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class MyDiceLoss(nn.Module):
def __init__(self, ignore_index=255):
super().__init__()
self.dice_criterion = BinaryDiceLoss()
self.ignore_index = ignore_index
def forward(self, logit, label_lst, class_lst):
loss = 0.0
for b in range(logit.shape[0]):
logit_b = logit[b][torch.where(class_lst[b] != self.ignore_index)]
label_lst_b = label_lst[b][torch.where(class_lst[b] != self.ignore_index)]
if logit_b.shape[0]:
loss += self.dice_criterion(logit_b, label_lst_b)
return loss / logit.shape[0]
class CDiceLoss(nn.Module):
def __init__(self, known_class=16, ignore_index=255):
super().__init__()
self.dice_criterion = BinaryDiceLoss()
self.bce_criterion = nn.BCELoss()
self.ignore_index = ignore_index
self.class_num=known_class
print('finetune with '+str(known_class)+" classes")
def forward(self, logit, label_lst, class_lst):
loss1 = torch.FloatTensor([0.0]).to(logit.device)
for i in range(self.class_num):
loss1 += (self.dice_criterion(logit[:, i], label_lst[:, i]) + self.bce_criterion(logit[:, i], label_lst[:, i].float()))
loss1 /= self.class_num
loss2 = 0.0
for i in range(self.class_num, logit.shape[1]):
loss2 += -torch.log((torch.mean(logit[:, i]) * 50).clamp(0, 1))
loss2 /= (logit.shape[1] - self.class_num)
loss3 = 0.0
num3 = 0
for i in range(logit.shape[1]):
for j in range(logit.shape[1]):
if i == j: continue
dice_loss = self.dice_criterion(logit[:, i], logit[:, j])
loss3 += (1.0 - dice_loss)
num3 += 1
loss3 = loss3 / num3
loss = (loss1 + loss2 + loss3) * 0.1
return {
'loss': loss,
'loss1': loss1,
'loss2': loss2,
'loss3': loss3,
}
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
def save_ckpt(batch_idx, model, optimizer, scheduler, path):
""" save current model
"""
torch.save({
"batch_idx": batch_idx,
"model_state": model.module.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
}, path)
print("Model saved as %s" % path)
def visualize(image, label, logit, label_lst, class_lst, save_path=None, denorm=None):
# logit: (256, H, W)
if not isinstance(image, np.ndarray):
image = image.detach().cpu().numpy()
label = label.detach().cpu().numpy()
logit = logit.detach().cpu().numpy()
label_lst = label_lst.detach().cpu().numpy()
class_lst = class_lst.detach().cpu().numpy()
if denorm:
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
_, axarr = plt.subplots(2, (1+logit.shape[0]), figsize=(5*(1+logit.shape[0]), 10))
axarr[0][0].imshow(image)
label[label == 255] = 0
axarr[1][0].imshow(label)
for i in range(logit.shape[0]):
if i < label_lst.shape[0]:
axarr[0][1+i].imshow(label_lst[i])
axarr[1][i+1].imshow((logit[i] >= 0.5).astype(np.uint8))
# _, axarr = plt.subplots(16, 32, figsize=(40, 20))
# for i in range(label.shape[0]):
# axarr[i//16][(i%16)*2].imshow(label[i])
# axarr[i//16][(i%16)*2].set_xticks([])
# axarr[i//16][(i%16)*2].set_yticks([])
# for i in range(logit.shape[0]):
# axarr[i//16][(i%16)*2+1].imshow((logit[i] >= 0.5).astype(np.uint8))
# axarr[i//16][(i%16)*2+1].set_xticks([])
# axarr[i//16][(i%16)*2+1].set_yticks([])
# label[label == 255] = 19
# C = logit.shape[0]
# logit = np.argmax(logit, axis=0)
# mask = np.zeros_like(logit)
# for c in range(C):
# t = class_lst[c]
# if t == 255: t = 19
# temp = (logit == c).astype(np.uint8)
# mask = np.ones_like(logit) * t * temp + mask * (1 - temp)
# _, axarr = plt.subplots(1, 3, figsize=(15, 5))
# axarr[0].imshow(image)
# axarr[1].imshow(label)
# axarr[2].imshow(mask)
if save_path:
plt.savefig(save_path)
else:
plt.show()
plt.close()
def val(opts, model, val_loader, device):
metrics = StreamSegMetrics(19)
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
model.eval()
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
for batch_idx, (images, labels, _, _, _) in tqdm(enumerate(val_loader)):
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
outputs, _, _, _ = model(images)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
#print(labels.shape, outputs.shape)
metrics.update(labels[0].detach().cpu().numpy(), outputs)
score = metrics.get_results()
print(str(opts.num_classes)+' classes')
print(metrics.to_str(score))
def train_stage1(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = utils.CrossEntropyLoss(ignore_index=255, size_average=True)
#l2_criterion = nn.MSELoss().to(device)
model.train()
epoch_records = {}
cur_itr = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
outputs, _, _, res_images = model(images)
#logits = torch.sigmoid(logits)
# loss = criterion(logits, labels_lst[:, :masks.shape[1]] * masks, class_lst)
#loss = criterion(logits, labels_lst, class_lst)
loss_seg = ce_criterion(outputs, labels, None)
#masks = ((labels.unsqueeze(dim=1)) != 255).float()
#loss_l2 = l2_criterion(res_images, images) * 0.01
#loss['loss'] += (loss_seg + loss_l2)
##loss['loss_l2'] = loss_l2
if ("seg" not in epoch_records): epoch_records["seg"]=[]
epoch_records["seg"].append(loss_seg.cpu().data.numpy())
#loss_ce = ce_criterion(outputs, labels, None)
#epoch_records['loss_ce'].append(loss_ce.item())
#loss = loss + loss_ce
optimizer.zero_grad()
loss_seg.backward()
optimizer.step()
if batch_idx % 10 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {}
if cur_itr % 1000 == 0:
val(opts, model, val_loader, device)
#for _, (images, labels, labels_true, labels_lst, class_lst) in enumerate(val_loader):
# if np.random.uniform(0, 1) < 0.9: continue
'''
for b in range(images.shape[0]):
visualize(images[b], labels_true[b], logits[b], labels_lst[b], class_lst[b], save_path=os.path.join(val_save_dir, f'{cur_itr}_{b}.png'), denorm=denorm)
# break
'''
model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
def train(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = utils.CrossEntropyLoss(ignore_index=255, size_average=True)
l2_criterion = nn.MSELoss().to(device)
model.train()
epoch_records = {}
cur_itr = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
outputs, logits, _, res_images = model(images)
#logits = torch.sigmoid(logits)
# loss = criterion(logits, labels_lst[:, :masks.shape[1]] * masks, class_lst)
loss = criterion(logits, labels_lst, class_lst)
loss_seg = ce_criterion(outputs, labels, None)
masks = ((labels.unsqueeze(dim=1)) != 255).float()
loss_l2 = l2_criterion(res_images, images) * 0.01
loss['loss'] += loss_l2
loss['loss'] += loss_seg
loss['loss_seg'] = loss_seg
loss['loss_l2'] = loss_l2
for key, value in loss.items():
if key not in epoch_records:
epoch_records[key] = []
epoch_records[key].append(value.item())
#loss_ce = ce_criterion(outputs, labels, None)
#epoch_records['loss_ce'].append(loss_ce.item())
#loss = loss + loss_ce
optimizer.zero_grad()
loss['loss'].backward()
optimizer.step()
if batch_idx % 10 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {}
if cur_itr % 500 == 0:
val(opts, model, val_loader, device)
#for _, (images, labels, labels_true, labels_lst, class_lst) in enumerate(val_loader):
# if np.random.uniform(0, 1) < 0.9: continue
for b in range(images.shape[0]):
visualize(images[b], labels_true[b], logits[b], labels_lst[b], class_lst[b], save_path=os.path.join(val_save_dir, f'{cur_itr}_{b}.png'), denorm=denorm)
# break
model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
# if batch_idx % 10 == 0:
# val(opts, model, val_loader, device)
# model.train()
import torch
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel._functions import Scatter
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except Exception:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
def main():
print(torch.version.cuda)
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
opts.num_classes = 256
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=8)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=8)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
remain_class = 19 - len(train_dst.unknown_target)
print('class num : '+str(remain_class))
opts.num_classes=remain_class
model = model_map[opts.model](num_classes=remain_class, output_stride=opts.output_stride, metric_dim=opts.metric_dim, finetune=False)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# # Set up metrics
# metrics = StreamSegMetrics(opts.num_classes)
# Set up optimizer
if (opts.finetune):
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
else:
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
#criterion = MyDiceLoss(ignore_index=255).to(device)
criterion = CDiceLoss(remain_class).to(device)
utils.mkdir(opts.output_dir)
# Restore
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
model_state_dict = model.state_dict()
checkpoint_state_dict = checkpoint["model_state"]
for key in checkpoint_state_dict:
if model_state_dict[key].shape != checkpoint_state_dict[key].shape:
print(key)
continue
model_state_dict[key] = checkpoint_state_dict[key]
model.load_state_dict(model_state_dict)
#model.load_state_dict(checkpoint["model_state"])
#model = nn.DataParallel(model)
device_ids=list(map(int, opts.gpu_id.split(',')))
#torch.cuda.set_device(device_ids[0])
print(device_ids)
#model = nn.DataParallel(model, device_ids=list(map(int, opts.gpu_id.split(','))))
model = BalancedDataParallel(2, model, dim=0, device_ids=[0,1])
#model = BalancedDataParallel(2, model, dim=0, device_ids=list(map(int, opts.gpu_id.split(','))))
model.to(device)
if opts.continue_training:
optimizer.load_state_dict(checkpoint["optimizer_state"])
scheduler.load_state_dict(checkpoint["scheduler_state"])
print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
#model = nn.DataParallel(model)
model = BalancedDataParallel(2, model, dim=0, device_ids=[0,1])
model.to(device)
if (opts.finetune):
train(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print)
else:
train_stage1(opts, model, train_loader, val_loader, None, optimizer, scheduler, device, printer=print)
if __name__ == '__main__':
main()
| 28,621 | 42.170437 | 171 | py |
RAML | RAML-master/incremental/main_metric.py | from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, cityscapes, Cityscapes_Novel
from utils import ext_transforms as et
from metrics import StreamSegMetrics
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from torch import Tensor
from typing import Tuple
from sklearn.metrics import f1_score
import cv2
def convert_label_to_similarity(normed_feature: Tensor, label: Tensor) -> Tuple[Tensor, Tensor]:
similarity_matrix = normed_feature @ normed_feature.transpose(1, 0)
label_matrix = label.unsqueeze(1) == label.unsqueeze(0)
positive_matrix = label_matrix.triu(diagonal=1)
negative_matrix = label_matrix.logical_not().triu(diagonal=1)
similarity_matrix = similarity_matrix.view(-1)
positive_matrix = positive_matrix.view(-1)
negative_matrix = negative_matrix.view(-1)
return similarity_matrix[positive_matrix], similarity_matrix[negative_matrix]
class CircleLoss(nn.Module):
def __init__(self, m: float, gamma: float) -> None:
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
def forward(self, sp: Tensor, sn: Tensor) -> Tensor:
ap = torch.clamp_min(- sp.detach() + 1 + self.m, min=0.)
an = torch.clamp_min(sn.detach() + self.m, min=0.)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - ap * (sp - delta_p) * self.gamma
logit_n = an * (sn - delta_n) * self.gamma
loss = self.soft_plus(torch.logsumexp(logit_n, dim=0) + torch.logsumexp(logit_p, dim=0))
return loss
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='../data/cityscapes',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='cityscapes',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=256,
help="num classes (default: None)")
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
# Train Options
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=10000,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.1,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=10000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: True)')
parser.add_argument("--batch_size", type=int, default=4,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=1,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=512)
parser.add_argument("--ckpt", default="output/final.pth", type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
parser.add_argument("--name", type=str, default='',help="download datasets")
parser.add_argument("--output_dir", type=str, default='output_metric', help="output path")
parser.add_argument("--novel_dir", type=str, default='./novel/', help="novel path")
parser.add_argument("--test_mode", type=str, default='16_3', choices=['16_1','16_3','12','14'],
help="test mode")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
def save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, path):
""" save current model
"""
torch.save({
"batch_idx": batch_idx,
"model_state": model.module.state_dict(),
"metric_model": metric_model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
}, path)
print("Model saved as %s" % path)
def get_spilt_center(feature,target,metric_model,label,device):
_, H, W, C = feature.shape
feature = feature.view(H,W,C) # (H*W, M)
target = target.view(H,W) # (H*W)
#feature = feature[target==label] # (N, M)
now_sum = torch.zeros(C,).to(device)
mask = target == label
print(mask.shape)
now_center_embedding=[]
mask = mask.cpu().data.numpy()
mask = mask.astype(np.uint8)
num_object, connect = cv2.connectedComponents(mask)
#novel_sum=0
for k in range(num_object):
now_connect = (connect == k)[np.newaxis, ...].astype(np.uint8)
#now_mask = mask[now_connect]
now_mask = now_connect * mask
print(np.sum(now_mask))
if (np.sum(now_mask)<100): continue
print(now_mask.shape)
print(feature.shape)
now_feature=feature[now_mask==1]
print(now_feature.shape)
now_feature=now_feature.view(-1,C)
now_feature=torch.sum(now_feature,dim=0)/np.sum(now_mask)
#now_feature=torch.Tensor(now_feature).to(device)
now_embedding=metric_model.forward_feature(now_feature.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
now_center_embedding.append(now_embedding)
return now_center_embedding
def get_all_center(feature,target,metric_model,label):
_, H, W, C = feature.shape
feature = feature.view(-1,C) # (H*W, M)
target = target.flatten() # (H*W)
feature = feature[target==label] # (N, M)
feature = torch.sum(feature, dim=0)
novel_sum = torch.sum(target == label)
now_center = feature / novel_sum
now_center_embedding = metric_model.forward_feature(now_center.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
return now_center_embedding
def generate_novel(novel_path_name, unknown_list, model, metric_model, device):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
center_embedding = {}
spilt_list=[]
with torch.no_grad():
for x in unknown_list: # [13, 14, 15]
print('generate novel: '+str(x))
center=[]
novel_dst = Cityscapes_Novel(novel_path=novel_path_name, novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
image = image.to(device)
target = target.to(device,dtype=torch.long)
_,_,feature,_ = model(image)
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
if (x in spilt_list):
now_center_embedding=get_spilt_center(feature,target,metric_model,x,device)
for now_center in now_center_embedding:
center.append(now_center)
else:
now_center_embedding=get_all_center(feature,target,metric_model,label=x)
center.append(now_center_embedding)
#center = center / novel_sum # (M,)
center=np.array(center)
print(center.shape)
'''
random select novel
np.random.seed(333333)
a = np.random.choice(100,1,False)
center=center[a]
print(center.shape)
'''
center=np.mean(center,axis=0)
center_embedding[x] = deepcopy(center)
return center_embedding
'''
def generate_novel(novel_path_name, unknown_list, model, metric_model, device):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
center_embedding = {}
with torch.no_grad():
for x in unknown_list: # [13, 14, 15]
print('generate novel: '+str(x))
center=None
novel_dst = Cityscapes_Novel(novel_path=novel_path_name, novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
image = image.to(device)
target = target.to(device,dtype=torch.long)
_,_,feature,_ = model(image)
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
feature = feature.view(-1, C) # (H*W, M)
target = target.flatten() # (H*W)
feature = feature[target==x] # (N, M)
feature = torch.sum(feature, dim=0)
if center is None:
center = torch.zeros(C,).to(device)
center += feature
novel_sum += torch.sum(target == x)
center = center / novel_sum # (M,)
center_embedding[x] = metric_model.forward_feature(center.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
return center_embedding
'''
def cosine_similarity(x,y):
num = x.dot(y.T)
denom = np.linalg.norm(x) * np.linalg.norm(y)
return num / denom
from copy import deepcopy
def concat_logits(logits, thereshold=100, erode=True, tag=None):
if (isinstance(tag,list)):
mask = np.array(tag)
logits = np.transpose(logits)
logits = logits * mask
logits = np.transpose(logits)
logits = (logits >= 0.5).astype(np.uint8)
logits = np.sum(logits,axis=0)
logits[logits>=1]=1
mask = logits == 1
logits = logits.astype(np.uint8)
if (erode == True):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
logits = cv2.dilate(logits, kernel)
logits = cv2.erode(logits, kernel)
#print(logits.shape)
num_object, connect = cv2.connectedComponents(logits)
region_list = []
for k in range(1,num_object):
now_connect = (connect == k)[np.newaxis, ...].astype(np.uint8)
#now_sum = np.sum(now_connect)
#print(now_sum)
if (np.sum(now_connect) < thereshold):
mask[connect == k] = 0
continue
region_list.append(k)
logits = logits * mask
return logits, region_list, connect
def check_novel_logit(opts,model,metric_model,class_no,meta_channel_num,device,beta=0.15):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
center_embedding = {}
spilt_list=[]
channel_tag=[0]*meta_channel_num
with torch.no_grad():
print('generate novel: '+str(class_no))
center=[]
novel_dst = Cityscapes_Novel(novel_path=opts.novel_dir, novel_no=class_no, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
#image, target = novel_transform(image,target)
image = image.to(device)
target = target.to(device,dtype=torch.long)
output,logit,feature,_ = model(image)
output = torch.argmax(output[0], dim=0).detach().cpu().numpy()
mask = target == class_no
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
#print(target.shape)
#print(mask.shape)
logit = logit[0, (-meta_channel_num):]
#print(logit.shape)
logit = logit * mask
mask = mask.data.cpu().numpy()
all_sum=np.sum(mask)
logit = logit.detach().cpu().numpy()
logit = (logit >= 0.5).astype(np.uint8)
for x in range(logit.shape[0]):
if (np.sum(logit[x])>all_sum*beta): channel_tag[x]=1
#print(logit.shape)
#for x in range(channel_num):
#print(image.shape)
#image= denorm(image.detach().cpu().numpy())[0] * 255
#print(image.shape)
image = (denorm(image.detach().cpu().numpy())[0] * 255).transpose(1, 2, 0).astype(np.uint8)
'''
plt.imshow(image)
plt.show()
plt.close()
_, axarr = plt.subplots(1, logit.shape[0], figsize=(5*logit.shape[0], 5))
for i in range(logit.shape[0]):
now_logit=cv2.resize(logit[i], output.shape[::-1], interpolation=cv2.INTER_NEAREST)
axarr[i].imshow(image)
axarr[i].imshow(now_logit, alpha=0.5)
plt.show()
plt.close()
'''
'''
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
if (x in spilt_list):
now_center_embedding=get_spilt_center(feature,target,metric_model,label=x)
for now_center in now_center_embedding:
center.append(now_center)
else:
now_center_embedding=get_all_center(feature,target,metric_model,label=x)
center.append(now_center_embedding)
'''
#center = center / novel_sum # (M,)
'''
center=np.array(center)
print(center.shape)
center=np.mean(center,axis=0)
center_embedding[x] = deepcopy(center)
'''
return channel_tag
def val(opts, model, metric_model, train_loader, val_loader, device,):
remain_class = 19 - len(Cityscapes.unknown_target)
metrics16 = StreamSegMetrics(19)
metrics19 = StreamSegMetrics(19, remain_class)
model.eval()
metric_model.eval()
if opts.save_val_results:
if not os.path.exists('results_1'):
os.mkdir('results_1')
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img_id = 0
# val_save_dir = os.path.join(opts.output_dir, 'val')
# os.makedirs(val_save_dir, exist_ok=True)
# denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
if (opts.test_mode == '16_1'):
center_embedding = generate_novel(opts.novel_dir, [13], model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
else:
center_embedding = generate_novel(opts.novel_dir, Cityscapes.unknown_target, model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
#using when 16+1 setting
#center_embedding = generate_novel(opts.novel_dir, [13], model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
name=['sky','person','rider','car','truck','bus','train','motorcycle','bicycle']
meta_channel_num=20-remain_class
all_tag=[0]*meta_channel_num
if (opts.test_mode == '16_1'):
for x in [13]:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
else:
for x in Cityscapes.unknown_target:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
#using when 16+1 setting
'''
for x in [13]:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
'''
#all_tag = np.array(all_tag)
print(all_tag)
miou_all=[]
miou_unknown=[]
for _, (images, labels, labels_true, _, _) in tqdm(enumerate(val_loader)):
assert images.shape[0] == 1
with torch.no_grad():
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
outputs, logits, features, _ = model(images) # outputs: (1, 16, H, W), logits: (1, 20, H, W), features: (1, 256, H/4, W/4)
known_class = outputs.shape[1]
h,w=outputs.shape[2],outputs.shape[3]
#outputs = logits[:,0:known_class,:,:].clone()
logits = F.interpolate(logits, size=features.shape[-2:], mode='bilinear', align_corners=False) # (1, 20, H/4, W/4)
features = features[0].detach().cpu().numpy() # (256, H/4, W/4)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
metrics16.update(labels[0].detach().cpu().numpy(), outputs)
outputs19 = deepcopy(outputs)
# in 16 + 3 setting and 16 + 1 setting
if ('16' in opts.test_mode):
outputs19[outputs19 == 13] = 16
outputs19[outputs19 == 14] = 17
outputs19[outputs19 == 15] = 18
# in 12 + 7 setting 10->12 11,12->10,11
if ('12' in opts.test_mode):
outputs19[outputs19 == 11] = 12
outputs19[outputs19 == 10] = 11
#in 14 + 5 setting unknown_target = [10,13,14,15,16]
# 11 -> 10 12 -> 11 17 -> 12 18 -> 13
if ('14' in opts.test_mode):
outputs19[outputs19 == 13] = 18
outputs19[outputs19 == 12] = 17
outputs19[outputs19 == 11] = 12
outputs19[outputs19 == 10] = 11
logits = logits[0].detach().cpu().numpy() # (20, H/4, W/4)
logits = logits[known_class:] # (3, H/4, W/4)
# concat inference
logits, region, connect = concat_logits(logits, thereshold=250, tag=all_tag)
for k in region:
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.8:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
'''
# default inference
logits = (logits >= 0.5).astype(np.uint8) # (3, H/4, W/4)
for c in range(logits.shape[0]):
logit = logits[c] # (H/4, W/4)
#Hl, Wl = logit.shape
#logit = cv2.resize(logit, (Wl//4, Hl//4), interpolation=cv2.INTER_NEAREST)
num_object, connect = cv2.connectedComponents(logit)
#connect = cv2.resize(connect, (Wl, Hl), interpolation=cv2.INTER_NEAREST)
for k in range(1, num_object+1):
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
if np.sum(mask) < 100: continue
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.75:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
'''
#using in 16+3 setting
if ('16' in opts.test_mode):
for x in range(13,16):
labels_true[labels_true==x]+=103
outputs19[outputs19==x]+=103
labels_true[labels_true==(x+3)]-=3
outputs19[outputs19==(x+3)]-=3
for x in range(116,119):
labels_true[labels_true==x]-=100
outputs19[outputs19==x]-=100
if (opts.test_mode == '16_1'):
for x in range(17,19):
labels_true[labels_true==x] = 255
# using in 12 + 7 setting 10->12 11,12->10,11
if ('12' in opts.test_mode):
labels_true[labels_true==10] = 112
outputs19[outputs19==10] =112
labels_true[labels_true == 11] = 10
outputs19[outputs19==11] = 10
labels_true[labels_true == 12] = 11
outputs19[outputs19 == 12] = 11
labels_true[labels_true==112] -= 100
outputs19[outputs19==112] -= 100
'''
labels_true[labels_true==10] = 112
outputs19[outputs19==10] =112
labels_true[labels_true == 11] = 10
outputs19[outputs19==11] = 10
labels_true[labels_true == 12] = 11
outputs19[outputs19 == 12] = 11
labels_true[labels_true==112] -= 100
outputs19[outputs19==112] -= 100
'''
#in 14 + 5 setting unknown_target = [10,13,14,15,16]
# 11 -> 10 12 -> 11 17 -> 12 18 -> 13
# 10 -> 14 ,13 ->15
if ('14' in opts.test_mode):
labels_true[labels_true == 10] = 114
outputs19[outputs19 == 10] = 114
for x in range(13,17):
labels_true[labels_true == x] = 100+2+x
outputs19[outputs19 == x] = 100+2+x
for x in range(11,13):
labels_true[labels_true == x] = x-1
outputs19[outputs19 == x] = x-1
for x in range(17,19):
labels_true[labels_true == x] = x-5
outputs19[outputs19 == x] = x-5
for x in range(114,119):
labels_true[labels_true == x] -=100
outputs19[outputs19 == x] -=100
metrics19.update(labels_true[0].detach().cpu().numpy(), outputs19)
'''
for x in range(13,16):
labels_true[labels_true==x]+=103
outputs19[outputs19==x]+=103
labels_true[labels_true==(x+3)]-=3
outputs19[outputs19==(x+3)]-=3
for x in range(116,119):
labels_true[labels_true==x]-=100
outputs19[outputs19==x]-=100
'''
'''
now_all_IoU = metrics19.get_results()['Mean IoU']
now_unkown_IoU = metrics19.get_results()['Unknown IoU']
miou_all.append(now_all_IoU)
miou_unknown.append(now_unkown_IoU)
metrics19.reset()
'''
#print(labels_true.shape)
#print(outputs19.shape)
if opts.save_val_results:
assert images.shape[0] == 1
target = labels_true[0].detach().cpu().numpy()
image = images[0].detach().cpu().numpy()
pred = outputs19
#pred = pred.reshape(h,w)
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
target = train_loader.dataset.decode_target(target).astype(np.uint8)
pred = train_loader.dataset.decode_target(pred).astype(np.uint8)
#scores = (255 * scores).squeeze().astype(np.uint8)
Image.fromarray(image).save('results_1/%d_image.png' % img_id)
Image.fromarray(target).save('results_1/%d_target.png' % img_id)
Image.fromarray(pred).save('results_1/%d_pred.png' % img_id)
#Image.fromarray(scores).save('results/%d_scores.png' % img_id)
# np.save('results/%d_dis_sum.npy' % img_id, dis_sum_map
img_id += 1
score16 = metrics16.get_results()
score19 = metrics19.get_results()
now_IoU = score19['Unknown IoU']
print('16 classes')
print(metrics16.to_str(score16))
print()
print('19 classes')
print(metrics19.to_str(score19))
'''
for x in range(0,100):
print(x,miou_all[x],miou_unknown[x])
'''
return now_IoU
def train(opts, model, metric_model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = nn.CrossEntropyLoss().to(device)
model.eval()
metric_model.train()
epoch_records = {'f1': []}
cur_itr = 0
best_IoU = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
#val_save_dir = os.path.join(opts.output_dir, 'val')
#os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
_, _, features, _ = model(images)
labels_lst = F.interpolate(labels_lst.float(), size=features.shape[-2:], mode='nearest')
new_features, new_labels, logits = metric_model(features, labels_lst)
cir_loss = criterion(*convert_label_to_similarity(new_features, new_labels)) * 0.1
ce_loss = ce_criterion(logits, new_labels.long())
loss = {
'loss': cir_loss + ce_loss,
'cir_loss': cir_loss,
'ce_loss': ce_loss,
}
for key, value in loss.items():
if key not in epoch_records:
epoch_records[key] = []
epoch_records[key].append(value.item())
optimizer.zero_grad()
loss['loss'].backward()
optimizer.step()
f1 = f1_score(new_labels.detach().cpu().numpy(),
torch.argmax(logits, dim=1).detach().cpu().numpy(),
average='macro')
epoch_records['f1'].append(f1)
if batch_idx % 100 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {'f1': []}
if cur_itr and cur_itr % 1000 == 0:
now_IoU = val(opts, model, metric_model, train_loader, val_loader, device)
if (now_IoU > best_IoU):
best_IoU = now_IoU
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'best.pth'))
print('best IoU :'+str(best_IoU))
model.eval()
metric_model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
val(opts, model, metric_model, train_loader, val_loader, device)
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
from dropblock import DropBlock2D
class MetricModel(nn.Module):
def __init__(self, known_class):
super().__init__()
self.model = nn.Sequential(
nn.Linear(256, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 128))
self.classifier = nn.Linear(128, known_class, bias=False)
self.known_class = known_class
self.dropblock = DropBlock2D(block_size=3, drop_prob=0.3)
def forward(self, feature, label_lst):
# feature: (B, 256, H, W)
# label_lst: (B, 17, H, W)
label_lst = label_lst[:, :self.known_class]
new_feature, new_label = [], []
for _ in range(self.known_class):
tmp_label_lst = self.dropblock(label_lst) # (B, 16, H, W)
for c in range(tmp_label_lst.shape[1]):
tmp_feature = (feature * tmp_label_lst[:, c:c+1, :, :]).view(feature.shape[0], feature.shape[1], -1) # (B, 256, H*W)
tmp_feature = tmp_feature.sum(dim=-1) # (B, 256)
tmp_num = tmp_label_lst[:, c:c+1, :, :].view(tmp_label_lst.shape[0], -1) # (B, H*W)
tmp_num = tmp_num.sum(dim=-1) # (B,)
keep_ind = tmp_num != 0
if keep_ind.shape[0]:
tmp_feature = tmp_feature[keep_ind]
tmp_num = tmp_num[keep_ind]
tmp_feature = tmp_feature / tmp_num.unsqueeze(dim=1) # (B, 256)
new_feature.append(tmp_feature)
new_label.append(torch.ones(tmp_feature.shape[0])*c)
new_feature = torch.cat(new_feature, dim=0) # (N, 256)
new_feature = self.model(new_feature) # (N, 128)
new_label = torch.cat(new_label, dim=0).to(feature.device) # (N,)
logit = self.classifier(new_feature) # (N, 16)
return F.normalize(new_feature), new_label.long(), logit
def forward_feature(self, feature):
# feature: (1, 256)
new_feature = self.model(feature) # (1, 128)
return F.normalize(new_feature)
def main():
print(torch.version.cuda)
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=8)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=8)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
unknown_num = len(train_dst.unknown_target)
remain_class = opts.num_classes - unknown_num
opts.num_classes = remain_class
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
model = model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride, metric_dim=opts.metric_dim)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# # Set up metrics
# metrics = StreamSegMetrics(opts.num_classes)
#criterion = MyDiceLoss(ignore_index=255).to(device)
criterion = CircleLoss(m=0.25, gamma=8.0).to(device)
utils.mkdir(opts.output_dir)
# Restore
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
res = model.load_state_dict(checkpoint["model_state"])
print(res)
model = nn.DataParallel(model)
model.to(device)
# if opts.continue_training:
# optimizer.load_state_dict(checkpoint["optimizer_state"])
# scheduler.load_state_dict(checkpoint["scheduler_state"])
# print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
model = nn.DataParallel(model)
model.to(device)
for _, param in model.named_parameters():
param.requires_grad = False
metric_model = MetricModel(remain_class).to(device)
optimizer = torch.optim.SGD(metric_model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
if (opts.test_only):
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
metric_model.load_state_dict(checkpoint["metric_model"])
val(opts, model, metric_model, train_loader, val_loader, device)
return
#res = model.load_state_dict(checkpoint["model_state"])
print(res)
#model = nn.DataParallel(model)
#model.to(device)
train(opts, model, metric_model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print)
if __name__ == '__main__':
main()
| 40,855 | 43.408696 | 152 | py |
RAML | RAML-master/incremental/test_metric.py | from datasets.cityscapes_novel import Cityscapes_Novel
from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, Cityscapes_Novel
from utils import ext_transforms as et
from metrics import StreamSegMetrics
from collections import namedtuple
from utils import colorEncode
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from sklearn.mixture import GaussianMixture
from statsmodels.distributions.empirical_distribution import ECDF
import joblib
import json
from sklearn import manifold
import queue
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([255, 255, 255])
colors = np.array(train_id_to_color)
colors = np.uint8(colors)
from dropblock import DropBlock2D
class MetricModel(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.Linear(256, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 128))
self.classifier = nn.Linear(128, 10, bias=False)
self.dropblock = DropBlock2D(block_size=3, drop_prob=0.3)
def forward(self, feature, label_lst):
# feature: (B, 256, H, W)
# label_lst: (B, 17, H, W)
label_lst = label_lst[:, :10]
new_feature, new_label = [], []
for _ in range(10):
tmp_label_lst = self.dropblock(label_lst) # (B, 16, H, W)
for c in range(tmp_label_lst.shape[1]):
tmp_feature = (feature * tmp_label_lst[:, c:c+1, :, :]).view(feature.shape[0], feature.shape[1], -1) # (B, 256, H*W)
tmp_feature = tmp_feature.sum(dim=-1) # (B, 256)
tmp_num = tmp_label_lst[:, c:c+1, :, :].view(tmp_label_lst.shape[0], -1) # (B, H*W)
tmp_num = tmp_num.sum(dim=-1) # (B,)
keep_ind = tmp_num != 0
if keep_ind.shape[0]:
tmp_feature = tmp_feature[keep_ind]
tmp_num = tmp_num[keep_ind]
tmp_feature = tmp_feature / tmp_num.unsqueeze(dim=1) # (B, 256)
new_feature.append(tmp_feature)
new_label.append(torch.ones(tmp_feature.shape[0])*c)
new_feature = torch.cat(new_feature, dim=0) # (N, 256)
new_feature = self.model(new_feature) # (N, 128)
new_label = torch.cat(new_label, dim=0).to(feature.device) # (N,)
logit = self.classifier(new_feature) # (N, 16)
return F.normalize(new_feature), new_label.long(), logit
def forward_feature(self, feature):
# feature: (1, 256)
new_feature = self.model(feature) # (1, 128)
return F.normalize(new_feature)
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='./datasets/data',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='voc',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_embedding_resnet101','deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Train Options
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=30e3,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.01,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=10000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: False)')
parser.add_argument("--batch_size", type=int, default=16,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=1,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=513)
parser.add_argument("--center", action='store_true', default=False,
help="use center checkpoint")
parser.add_argument("--center_checkpoint", type=str, default='./center.npy',
help="use center checkpoint")
parser.add_argument("--ckpt", default=None, type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
def Normalization(x):
min_value = np.min(x)
max_value = np.max(x)
return (x - min_value) / (max_value - min_value)
def Certainty(x, ecdf, thre1, thre2, mean, cov):
x = ecdf(x)
# res = x
# res[res>0.2] = 1
threshold = ecdf(thre1)
coefficient = 50
res = 1 / (1 + np.exp(-coefficient * (x - threshold)))
return res
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
novel_dst = Cityscapes(root=opts.data_root,
split='train', transform=val_transform)
return train_dst, val_dst, novel_dst
def Coefficient_map(x, thre):
lamda = 20
return 1 / (1 + np.exp(lamda * (x - thre)))
def val(opts, model, metric_model, train_loader, val_loader, device):
metrics16 = StreamSegMetrics(19)
metrics19 = StreamSegMetrics(19)
model.eval()
metric_model.eval()
# val_save_dir = os.path.join(opts.output_dir, 'val')
# os.makedirs(val_save_dir, exist_ok=True)
# denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
center_embedding = generate_novel('novel', Cityscapes.unknown_target, model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
#center_embedding = align_embedding(opts, model, metric_model, train_loader, device, center_embedding)
for _, (images, labels, labels_true, _, _) in tqdm(enumerate(val_loader)):
assert images.shape[0] == 1
with torch.no_grad():
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
outputs, logits, features, _ = model(images) # outputs: (1, 16, H, W), logits: (1, 20, H, W), features: (1, 256, H/4, W/4)
logits = F.interpolate(logits, size=features.shape[-2:], mode='bilinear', align_corners=False) # (1, 20, H/4, W/4)
features = features[0].detach().cpu().numpy() # (256, H/4, W/4)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
metrics16.update(labels[0].detach().cpu().numpy(), outputs)
outputs19 = deepcopy(outputs)
#outputs19[outputs19 == 13] = 16
#outputs19[outputs19 == 14] = 17
#outputs19[outputs19 == 15] = 18
logits = logits[0].detach().cpu().numpy() # (20, H/4, W/4)
logits = logits[-9:] # (3, H/4, W/4)
logits = (logits >= 0.5).astype(np.uint8) # (3, H/4, W/4)
for c in range(logits.shape[0]):
logit = logits[c] # (H/4, W/4)
#Hl, Wl = logit.shape
#logit = cv2.resize(logit, (Wl//4, Hl//4), interpolation=cv2.INTER_NEAREST)
num_object, connect = cv2.connectedComponents(logit)
#connect = cv2.resize(connect, (Wl, Hl), interpolation=cv2.INTER_NEAREST)
for k in range(1, num_object+1):
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
if np.sum(mask) < 100: continue
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.75:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
metrics19.update(labels_true[0].detach().cpu().numpy(), outputs19)
score16 = metrics16.get_results()
score19 = metrics19.get_results()
print('16 classes')
print(metrics16.to_str(score16))
print()
print('19 classes')
print(metrics19.to_str(score19))
def select_novel_each_target(novel_loader, unknown_target, device, save_path, shot_num=5):
print('select novel '+str(unknown_target))
now_path=os.path.join(save_path,str(unknown_target))
if (os.path.exists(now_path)==False):
os.makedirs(now_path)
file_path=os.path.join(now_path,'novel.txt')
f = open(file_path,'a',encoding = "utf-8")
q = queue.PriorityQueue()
for (images, labels, labels_true, image_name, target_name) in novel_loader:
labels_true=labels_true.to(device, dtype=torch.long)
now_sum=torch.sum(labels_true==unknown_target).data.cpu()
q.put([now_sum,(image_name,target_name)])
if (q.qsize()>shot_num): q.get()
assert q.qsize()==shot_num
while q.empty()==False:
now_sum,now_name=q.get()
image_name="".join(now_name[0])
target_name="".join(now_name[1])
f.write(image_name+'\t'+target_name+'\n')
f.close()
def select_novel(novel_loader, unknown_list, device, save_path='./novel', shot_num=5):
if (os.path.exists(save_path)==False):
os.makedirs(save_path)
for x in unknown_list:
select_novel_each_target(novel_loader,x,device,save_path, shot_num)
def generate_novel(novel_all, novel_path_name, unknown_list, model, device, shot_num=5):
model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
with torch.no_grad():
for x in unknown_list:
print('generate novel: '+str(x))
log_path=os.path.join(novel_path_name,str(x))
center=None
novel_dst = Cityscapes_Novel(novel_path=novel_path_name,novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum=0
for (image,target) in novel_loader:
print(image.max(), image.min(), '--------------')
image=image.to(device)
target=target.to(device,dtype=torch.long)
print(image.shape)
output,feature=model(image)
if target.shape[-1] != feature.shape[-1]:
target = torch.nn.functional.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode="nearest").squeeze(dim=1)
feature=feature.permute(0, 2, 3, 1)
b,h,w,c=feature.shape
feature=feature.view(h*w,c)
target=target.flatten()
print(target.shape)
print(feature.shape)
# for c in range(19):
# if c in target:
# temp=feature[target==c]
# print(c, np.round(np.mean(temp.detach().cpu().numpy(), axis=0), 2))
feature=feature[target==x]
feature=torch.sum(feature,dim=0)
if (center==None): center=torch.zeros(c,).to(device)
center+=feature
novel_sum+=torch.sum(target==x)
center=center/novel_sum
center_path=os.path.join(log_path,'novel.pth')
print(center.shape)
torch.save(center,center_path)
novel_all[x]=center.clone()
return novel_all
# def get_novel(center, num_classes, unknown_list):
# novel = torch.empty((num_classes,center.shape[1]))
# n=0
# x=0
# while (n<num_classes):
# if n in unknown_list:
# n+=1
# continue
# novel[n]=center[x].clone()
# x+=1
# n+=1
# return novel
def main():
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
# Setup visualization
vis = Visualizer(port=opts.vis_port,
env=opts.vis_env) if opts.enable_vis else None
if vis is not None: # display options
vis.vis_table("Options", vars(opts))
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst, novel_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=16)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=16)
novel_loader = data.DataLoader(
novel_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=16)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3plus_embedding_resnet101': network.deeplabv3plus_embedding_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
model = model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# Set up metrics
metrics = StreamSegMetrics(opts.num_classes)
# Set up optimizer
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
#optimizer = torch.optim.SGD(params=model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
#torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.lr_decay_step, gamma=opts.lr_decay_factor)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
# Set up criterion
#criterion = utils.get_loss(opts.loss_type)
if opts.loss_type == 'focal_loss':
criterion = utils.FocalLoss(ignore_index=255, size_average=True)
elif opts.loss_type == 'cross_entropy':
criterion = utils.CrossEntropyLoss(ignore_index=255, alpha=0.01, beta=0.01/80, gamma=0)
# def save_ckpt(path):
# """ save current model
# """
# torch.save({
# "cur_itrs": cur_itrs,
# "model_state": model.module.state_dict(),
# "optimizer_state": optimizer.state_dict(),
# "scheduler_state": scheduler.state_dict(),
# "best_score": best_score,
# }, path)
# print("Model saved as %s" % path)
utils.mkdir('checkpoints_131415_embedding')
# Restore
# best_score = 0.0
# cur_itrs = 0
# cur_epochs = 0
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
res = model.load_state_dict(checkpoint["model_state"])
print(res)
#model = nn.DataParallel(model)
model.to(device)
if opts.continue_training:
optimizer.load_state_dict(checkpoint["optimizer_state"])
scheduler.load_state_dict(checkpoint["scheduler_state"])
cur_itrs = checkpoint["cur_itrs"]
best_score = checkpoint['best_score']
print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
opts.gpu_id = [1]
# model = nn.DataParallel(model,device_ids=opts.gpu_id)
#model = nn.DataParallel(model)
model = model.cuda()
#========== Train Loop ==========#
vis_sample_id = np.random.randint(0, len(val_loader), opts.vis_num_samples,
np.int32) if opts.enable_vis else None # sample idxs for visualization
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # denormalization for ori images
#print(model)
# if (opts.center):
# center=torch.load(opts.center_checkpoint)
# print(center.shape, opts.num_classes, train_dst.unknown_target, '++++++++++')
#novel=get_novel(center,opts.num_classes,train_dst.unknown_target)
novel=np.load(opts.center_checkpoint)
novel=torch.from_numpy(novel)
# novel=torch.load('center.pth')
# novel=torch.cat([novel[:13], torch.zeros((3, novel.shape[1])).float().to(novel.device), novel[13:]], dim=0)
novel=novel.to(device)
print(novel.shape)
#select_novel(novel_loader,train_dst.unknown_target,device)
novel=generate_novel(novel,'./novel',Cityscapes.unknown_target,model,device,shot_num=5)
novel=torch.relu(novel)
for i in range(novel.shape[0]):
print(i, novel[i].detach().cpu().numpy())
novel=novel.to(device)
print(novel.shape)
# for i in range(novel.shape[0]):
# print(i, np.round(novel[i].detach().cpu().numpy(), 2))
# return
print('eval mode')
model.eval()
val_score, ret_samples = validate(
opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, novel=novel, ret_samples_ids=vis_sample_id)
print(metrics.to_str(val_score))
return
# if opts.test_only:
# model.eval()
# val_score, ret_samples = validate(
# opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
# print(metrics.to_str(val_score))
# return
# interval_loss = 0
# while True: #cur_itrs < opts.total_itrs:
# # ===== Train =====
# model.train()
# cur_epochs += 1
# for (images, labels, labels_true) in train_loader:
# cur_itrs += 1
# images = images.to(device, dtype=torch.float32)
# labels = labels.to(device, dtype=torch.long)
# optimizer.zero_grad()
# outputs, centers, features = model(images)
# loss = criterion(outputs, labels, features)
# loss.backward()
# optimizer.step()
# np_loss = loss.detach().cpu().numpy()
# interval_loss += np_loss
# if vis is not None:
# vis.vis_scalar('Loss', cur_itrs, np_loss)
# if (cur_itrs) % 10 == 0:
# interval_loss = interval_loss/10
# print("Epoch %d, Itrs %d/%d, Loss=%f" %
# (cur_epochs, cur_itrs, opts.total_itrs, interval_loss))
# interval_loss = 0.0
# if (cur_itrs) % opts.val_interval == 0:
# save_ckpt('checkpoints_131415_embedding/latest_%s_%s_os%d.pth' %
# (opts.model, opts.dataset, opts.output_stride))
# print("validation...")
# model.eval()
# val_score, ret_samples = validate(
# opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
# print(metrics.to_str(val_score))
# if val_score['Mean IoU'] > best_score: # save best model
# best_score = val_score['Mean IoU']
# save_ckpt('checkpoints_131415_embedding/best_%s_%s_os%d.pth' %
# (opts.model, opts.dataset,opts.output_stride))
# if vis is not None: # visualize validation score and samples
# vis.vis_scalar("[Val] Overall Acc", cur_itrs, val_score['Overall Acc'])
# vis.vis_scalar("[Val] Mean IoU", cur_itrs, val_score['Mean IoU'])
# vis.vis_table("[Val] Class IoU", val_score['Class IoU'])
# for k, (img, target, lbl) in enumerate(ret_samples):
# img = (denorm(img) * 255).astype(np.uint8)
# target = train_dst.decode_target(target).transpose(2, 0, 1).astype(np.uint8)
# lbl = train_dst.decode_target(lbl).transpose(2, 0, 1).astype(np.uint8)
# concat_img = np.concatenate((img, target, lbl), axis=2) # concat along width
# vis.vis_image('Sample %d' % k, concat_img)
# model.train()
# scheduler.step()
# if cur_itrs >= opts.total_itrs:
# return
if __name__ == '__main__':
main()
| 31,049 | 46.40458 | 153 | py |
RAML | RAML-master/incremental/datasets/voc.py | import os
import sys
import tarfile
import collections
import torch.utils.data as data
import shutil
import numpy as np
from PIL import Image
from torchvision.datasets.utils import download_url, check_integrity
DATASET_YEAR_DICT = {
'2012': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'filename': 'VOCtrainval_11-May-2012.tar',
'md5': '6cd6e144f989b92b3379bac3b3de84fd',
'base_dir': 'VOCdevkit/VOC2012'
},
'2011': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar',
'filename': 'VOCtrainval_25-May-2011.tar',
'md5': '6c3384ef61512963050cb5d687e5bf1e',
'base_dir': 'TrainVal/VOCdevkit/VOC2011'
},
'2010': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',
'filename': 'VOCtrainval_03-May-2010.tar',
'md5': 'da459979d0c395079b5c75ee67908abb',
'base_dir': 'VOCdevkit/VOC2010'
},
'2009': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar',
'filename': 'VOCtrainval_11-May-2009.tar',
'md5': '59065e4b188729180974ef6572f6a212',
'base_dir': 'VOCdevkit/VOC2009'
},
'2008': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar',
'filename': 'VOCtrainval_11-May-2012.tar',
'md5': '2629fa636546599198acfcfbfcf1904a',
'base_dir': 'VOCdevkit/VOC2008'
},
'2007': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
'filename': 'VOCtrainval_06-Nov-2007.tar',
'md5': 'c52e279531787c972589f7e41ab4ae64',
'base_dir': 'VOCdevkit/VOC2007'
}
}
def voc_cmap(N=256, normalized=False):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap/255 if normalized else cmap
return cmap
class VOCSegmentation(data.Dataset):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset.
Args:
root (string): Root directory of the VOC Dataset.
year (string, optional): The dataset year, supports years 2007 to 2012.
image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
cmap = voc_cmap()
def __init__(self,
root,
year='2012',
image_set='train',
download=False,
transform=None):
is_aug=False
if year=='2012_aug':
is_aug = True
year = '2012'
self.root = os.path.expanduser(root)
self.year = year
self.url = DATASET_YEAR_DICT[year]['url']
self.filename = DATASET_YEAR_DICT[year]['filename']
self.md5 = DATASET_YEAR_DICT[year]['md5']
self.transform = transform
self.image_set = image_set
base_dir = DATASET_YEAR_DICT[year]['base_dir']
voc_root = os.path.join(self.root, base_dir)
image_dir = os.path.join(voc_root, 'JPEGImages')
if download:
download_extract(self.url, self.root, self.filename, self.md5)
if not os.path.isdir(voc_root):
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if is_aug and image_set=='train':
mask_dir = os.path.join(voc_root, 'SegmentationClassAug')
assert os.path.exists(mask_dir), "SegmentationClassAug not found, please refer to README.md and prepare it manually"
split_f = os.path.join( self.root, 'train_aug.txt')#'./datasets/data/train_aug.txt'
else:
mask_dir = os.path.join(voc_root, 'SegmentationClass')
splits_dir = os.path.join(voc_root, 'ImageSets/Segmentation')
split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt')
if not os.path.exists(split_f):
raise ValueError(
'Wrong image_set entered! Please use image_set="train" '
'or image_set="trainval" or image_set="val"')
with open(os.path.join(split_f), "r") as f:
file_names = [x.strip() for x in f.readlines()]
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
self.masks = [os.path.join(mask_dir, x + ".png") for x in file_names]
assert (len(self.images) == len(self.masks))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
img = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.masks[index])
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self.images)
@classmethod
def decode_target(cls, mask):
"""decode semantic mask to RGB image"""
return cls.cmap[mask]
def download_extract(url, root, filename, md5):
download_url(url, root, filename, md5)
with tarfile.open(os.path.join(root, filename), "r") as tar:
tar.extractall(path=root) | 6,061 | 36.190184 | 128 | py |
RAML | RAML-master/incremental/datasets/cityscapes.py | import json
import os
from collections import namedtuple
from matplotlib import set_loglevel
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
import cv2
class Cityscapes(data.Dataset):
"""Cityscapes <http://www.cityscapes-dataset.com/> Dataset.
**Parameters:**
- **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
- **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
- **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
- **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
- **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
"""
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([0, 0, 0])
train_id_to_color = np.array(train_id_to_color)
id_to_train_id = np.array([c.train_id for c in classes])
unknown_target = None
# unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
# 12+7
unknown_target = [10,13,14,15,16,17,18]
# 14+5
# unknown_target = [10,13,14,15,16]
# 18+1
#unknown_target = [13]
# 16+3 / 16+1
#unknown_target = [13,14,15]
# unknown_target = [i for i in range(19)]
# unknown_target.pop(13)
print('unknown_target is : ', unknown_target)
# unknown_target = [18]
#train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
# (70, 130, 180), (220, 20, 60), (0, 0, 142)]
#train_id_to_color = np.array(train_id_to_color)
#id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
def __init__(self, root, split='train', mode='fine', target_type='semantic', transform=None):
self.root = os.path.expanduser(root)
self.mode = 'gtFine'
self.target_type = target_type
self.images_dir = os.path.join(self.root, 'leftImg8bit', split)
self.targets_dir = os.path.join(self.root, self.mode, split)
# self.targets_dir = self.images_dir
self.transform = transform
self.split = split
self.images = []
self.targets = []
if split not in ['train', 'test_car', 'val','test_truck', 'test_bus', 'test_car_1_shot',
'test_truck_1_shot', 'test_bus_1_shot', 'car_vis', 'bus_vis','demo_video',
'car_100','car_1000']:
raise ValueError('Invalid split for mode! Please use split="train", split="test"'
' or split="val"')
if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the'
' specified "split" and "mode" are inside the "root" directory')
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
files_name = os.listdir(img_dir)
files_name = sorted(files_name)
for file_name in files_name:
self.images.append(os.path.join(img_dir, file_name))
target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],
self._get_target_suffix(self.mode, self.target_type))
self.targets.append(os.path.join(target_dir, target_name))
@classmethod
def encode_target(cls, target):
target = cls.id_to_train_id[np.array(target)]
target_true = target.copy()
# instance, counts = np.unique(target, False, False, True)
# print('target', instance, counts)
if cls.unknown_target != None:
cont = 0
for h_c in cls.unknown_target:
target[target == h_c - cont] = 100
for c in range(h_c - cont + 1, 19):
target[target == c] = c - 1
# target_true[target_true == c] = c - 1
cont = cont + 1
# target_true[target == 100] = 19 - len(cls.unknown_target)
target[target == 100] = 255
return target, target_true
@classmethod
def decode_target(cls, target):
target[target == 255] = 19
#target = target.astype('uint8') + 1
return cls.train_id_to_color[target]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
# image = Image.open(self.images[index])
target = Image.open(self.targets[index])
if self.transform:
image, target = self.transform(image, target)
target, target_true = self.encode_target(target)
target_lst, class_lst = self.encode_target_czifan(target)
return image, target, target_true, target_lst, class_lst
def __len__(self):
return len(self.images)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if target_type == 'instance':
return '{}_instanceIds.png'.format(mode)
elif target_type == 'semantic':
return '{}_labelIds.png'.format(mode)
elif target_type == 'color':
return '{}_color.png'.format(mode)
elif target_type == 'polygon':
return '{}_polygons.json'.format(mode)
elif target_type == 'depth':
return '{}_disparity.png'.format(mode)
def encode_target_czifan(self, target, output_size=16):
known_class = 19 - len(Cityscapes.unknown_target)
target_lst = np.zeros((known_class + 1, *target.shape))
class_lst = np.ones(known_class + 1) * 255
for c in range(known_class):
target_lst[c] = (target == c)
class_lst[c] = c
return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
# target_lst = np.zeros((output_size**2, *target.shape))
# class_lst = np.ones(output_size**2) * 255
# for t in np.unique(target):
# tmp = np.where(target == t)
# gy, gx = int(np.mean(tmp[0])/32), int(np.mean(tmp[1])/32)
# target_lst[gy*output_size+gx,...] = (target == t)
# class_lst[gy*output_size+gx] = t
# return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
# temp = cv2.resize(target.astype(np.uint8), (output_size, output_size), interpolation=cv2.INTER_LINEAR).reshape(-1)
# #temp = torch.nn.functional.interpolate(target.clone().unsqueeze(dim=1).float(), size=[output_size, output_size], mode="nearest").view(-1)
# target_lst, class_lst = [], []
# for t in temp:
# if t == 255:
# target_lst.append(np.zeros_like(target))
# else:
# target_lst.append(target == t)
# class_lst.append(t.item())
# target_lst = np.stack(target_lst, axis=0).astype(np.uint8) # (256, 512, 512)
# class_lst = np.asarray(class_lst).astype(np.uint8) # (256,)
# return target_lst, class_lst
| 11,663 | 51.540541 | 168 | py |
RAML | RAML-master/incremental/datasets/cityscapes_novel.py | import json
import os
from collections import namedtuple
from matplotlib import set_loglevel
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
class Cityscapes_Novel(data.Dataset):
"""Cityscapes <http://www.cityscapes-dataset.com/> Dataset.
**Parameters:**
- **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
- **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
- **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
- **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
- **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
"""
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([0, 0, 0])
train_id_to_color = np.array(train_id_to_color)
id_to_train_id = np.array([c.train_id for c in classes])
unknown_target = None
# unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
unknown_target = [13,14,15]
# unknown_target = [i for i in range(19)]
# unknown_target.pop(13)
print('unknown_target is : ', unknown_target)
# unknown_target = [18]
#train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
# (70, 130, 180), (220, 20, 60), (0, 0, 142)]
#train_id_to_color = np.array(train_id_to_color)
#id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
def __init__(self, novel_path, novel_no, novel_name='novel.txt', transform=None):
self.root=os.path.join(novel_path,str(novel_no))
self.root=os.path.join(self.root,novel_name)
self.transform=transform
file = open(self.root,'r').readlines()
self.images=[]
self.targets=[]
for line in file:
lines=line.strip('\n').split('\t')
self.images.append(lines[0])
self.targets.append(lines[1])
# self.targets = self.images
# print(self.images)
# print(self.images[10])
# print(self.images[102])
# print(self.images[107])
# print(self.images[197])
# print(self.images[200])
# print(self.images[207])
# print(self.images[474])
# print(self.images[486])
@classmethod
def encode_target(cls, target):
target = cls.id_to_train_id[np.array(target)]
return target
@classmethod
def decode_target(cls, target):
target[target == 255] = 19
#target = target.astype('uint8') + 1
return cls.train_id_to_color[target]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
# image = Image.open(self.images[index])
target = Image.open(self.targets[index])
if self.transform:
image, target = self.transform(image, target)
target = self.encode_target(target)
# unloader = transforms.ToPILImage()
#
# plt.figure()
# plt.imshow(unloader(image.cpu().clone()))
# plt.show()
#
# plt.figure()
# plt.imshow(target)
# plt.show()
#
# plt.figure()
# plt.imshow(target_true)
# plt.show()
#
# instance, counts = np.unique(target, False, False, True)
# print('target', instance, counts)
# instance, counts = np.unique(target_true, False, False, True)
# print('true', instance, counts)
# return image
return image, target
def __len__(self):
return len(self.images)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if target_type == 'instance':
return '{}_instanceIds.png'.format(mode)
elif target_type == 'semantic':
return '{}_labelIds.png'.format(mode)
elif target_type == 'color':
return '{}_color.png'.format(mode)
elif target_type == 'polygon':
return '{}_polygons.json'.format(mode)
elif target_type == 'depth':
return '{}_disparity.png'.format(mode) | 8,742 | 48.39548 | 168 | py |
RAML | RAML-master/incremental/datasets/.ipynb_checkpoints/cityscapes-checkpoint.py | import json
import os
from collections import namedtuple
from matplotlib import set_loglevel
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
import cv2
class Cityscapes(data.Dataset):
"""Cityscapes <http://www.cityscapes-dataset.com/> Dataset.
**Parameters:**
- **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
- **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
- **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
- **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
- **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
"""
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([0, 0, 0])
train_id_to_color = np.array(train_id_to_color)
id_to_train_id = np.array([c.train_id for c in classes])
unknown_target = None
# unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
# 12+7
unknown_target = [10,13,14,15,16,17,18]
# 14+5
# unknown_target = [10,13,14,15,16]
# 18+1
#unknown_target = [13]
# 16+3 / 16+1
#unknown_target = [13,14,15]
# unknown_target = [i for i in range(19)]
# unknown_target.pop(13)
print('unknown_target is : ', unknown_target)
# unknown_target = [18]
#train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
# (70, 130, 180), (220, 20, 60), (0, 0, 142)]
#train_id_to_color = np.array(train_id_to_color)
#id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
def __init__(self, root, split='train', mode='fine', target_type='semantic', transform=None):
self.root = os.path.expanduser(root)
self.mode = 'gtFine'
self.target_type = target_type
self.images_dir = os.path.join(self.root, 'leftImg8bit', split)
self.targets_dir = os.path.join(self.root, self.mode, split)
# self.targets_dir = self.images_dir
self.transform = transform
self.split = split
self.images = []
self.targets = []
if split not in ['train', 'test_car', 'val','test_truck', 'test_bus', 'test_car_1_shot',
'test_truck_1_shot', 'test_bus_1_shot', 'car_vis', 'bus_vis','demo_video',
'car_100','car_1000']:
raise ValueError('Invalid split for mode! Please use split="train", split="test"'
' or split="val"')
if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the'
' specified "split" and "mode" are inside the "root" directory')
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
files_name = os.listdir(img_dir)
files_name = sorted(files_name)
for file_name in files_name:
self.images.append(os.path.join(img_dir, file_name))
target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],
self._get_target_suffix(self.mode, self.target_type))
self.targets.append(os.path.join(target_dir, target_name))
@classmethod
def encode_target(cls, target):
target = cls.id_to_train_id[np.array(target)]
target_true = target.copy()
# instance, counts = np.unique(target, False, False, True)
# print('target', instance, counts)
if cls.unknown_target != None:
cont = 0
for h_c in cls.unknown_target:
target[target == h_c - cont] = 100
for c in range(h_c - cont + 1, 19):
target[target == c] = c - 1
# target_true[target_true == c] = c - 1
cont = cont + 1
# target_true[target == 100] = 19 - len(cls.unknown_target)
target[target == 100] = 255
return target, target_true
@classmethod
def decode_target(cls, target):
target[target == 255] = 19
#target = target.astype('uint8') + 1
return cls.train_id_to_color[target]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
# image = Image.open(self.images[index])
target = Image.open(self.targets[index])
if self.transform:
image, target = self.transform(image, target)
target, target_true = self.encode_target(target)
target_lst, class_lst = self.encode_target_czifan(target)
return image, target, target_true, target_lst, class_lst
def __len__(self):
return len(self.images)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if target_type == 'instance':
return '{}_instanceIds.png'.format(mode)
elif target_type == 'semantic':
return '{}_labelIds.png'.format(mode)
elif target_type == 'color':
return '{}_color.png'.format(mode)
elif target_type == 'polygon':
return '{}_polygons.json'.format(mode)
elif target_type == 'depth':
return '{}_disparity.png'.format(mode)
def encode_target_czifan(self, target, output_size=16):
known_class = 19 - len(Cityscapes.unknown_target)
target_lst = np.zeros((known_class + 1, *target.shape))
class_lst = np.ones(known_class + 1) * 255
for c in range(known_class):
target_lst[c] = (target == c)
class_lst[c] = c
return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
# target_lst = np.zeros((output_size**2, *target.shape))
# class_lst = np.ones(output_size**2) * 255
# for t in np.unique(target):
# tmp = np.where(target == t)
# gy, gx = int(np.mean(tmp[0])/32), int(np.mean(tmp[1])/32)
# target_lst[gy*output_size+gx,...] = (target == t)
# class_lst[gy*output_size+gx] = t
# return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
# temp = cv2.resize(target.astype(np.uint8), (output_size, output_size), interpolation=cv2.INTER_LINEAR).reshape(-1)
# #temp = torch.nn.functional.interpolate(target.clone().unsqueeze(dim=1).float(), size=[output_size, output_size], mode="nearest").view(-1)
# target_lst, class_lst = [], []
# for t in temp:
# if t == 255:
# target_lst.append(np.zeros_like(target))
# else:
# target_lst.append(target == t)
# class_lst.append(t.item())
# target_lst = np.stack(target_lst, axis=0).astype(np.uint8) # (256, 512, 512)
# class_lst = np.asarray(class_lst).astype(np.uint8) # (256,)
# return target_lst, class_lst
| 11,663 | 51.540541 | 168 | py |
RAML | RAML-master/incremental/datasets/.ipynb_checkpoints/cityscapes_novel-checkpoint.py | import json
import os
from collections import namedtuple
from matplotlib import set_loglevel
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
class Cityscapes_Novel(data.Dataset):
"""Cityscapes <http://www.cityscapes-dataset.com/> Dataset.
**Parameters:**
- **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
- **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
- **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
- **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
- **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
"""
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([0, 0, 0])
train_id_to_color = np.array(train_id_to_color)
id_to_train_id = np.array([c.train_id for c in classes])
unknown_target = None
# unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
unknown_target = [13,14,15]
# unknown_target = [i for i in range(19)]
# unknown_target.pop(13)
print('unknown_target is : ', unknown_target)
# unknown_target = [18]
#train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
# (70, 130, 180), (220, 20, 60), (0, 0, 142)]
#train_id_to_color = np.array(train_id_to_color)
#id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
def __init__(self, novel_path, novel_no, novel_name='novel.txt', transform=None):
self.root=os.path.join(novel_path,str(novel_no))
self.root=os.path.join(self.root,novel_name)
self.transform=transform
file = open(self.root,'r').readlines()
self.images=[]
self.targets=[]
for line in file:
lines=line.strip('\n').split('\t')
self.images.append(lines[0])
self.targets.append(lines[1])
# self.targets = self.images
# print(self.images)
# print(self.images[10])
# print(self.images[102])
# print(self.images[107])
# print(self.images[197])
# print(self.images[200])
# print(self.images[207])
# print(self.images[474])
# print(self.images[486])
@classmethod
def encode_target(cls, target):
target = cls.id_to_train_id[np.array(target)]
return target
@classmethod
def decode_target(cls, target):
target[target == 255] = 19
#target = target.astype('uint8') + 1
return cls.train_id_to_color[target]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
# image = Image.open(self.images[index])
target = Image.open(self.targets[index])
if self.transform:
image, target = self.transform(image, target)
target = self.encode_target(target)
# unloader = transforms.ToPILImage()
#
# plt.figure()
# plt.imshow(unloader(image.cpu().clone()))
# plt.show()
#
# plt.figure()
# plt.imshow(target)
# plt.show()
#
# plt.figure()
# plt.imshow(target_true)
# plt.show()
#
# instance, counts = np.unique(target, False, False, True)
# print('target', instance, counts)
# instance, counts = np.unique(target_true, False, False, True)
# print('true', instance, counts)
# return image
return image, target
def __len__(self):
return len(self.images)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if target_type == 'instance':
return '{}_instanceIds.png'.format(mode)
elif target_type == 'semantic':
return '{}_labelIds.png'.format(mode)
elif target_type == 'color':
return '{}_color.png'.format(mode)
elif target_type == 'polygon':
return '{}_polygons.json'.format(mode)
elif target_type == 'depth':
return '{}_disparity.png'.format(mode) | 8,742 | 48.39548 | 168 | py |
RAML | RAML-master/incremental/network/_deeplab.py | import torch
from torch import nn
from torch.nn import functional as F
from .utils import _SimpleSegmentationModel, _SimpleSegmentationModel_embedding, _SimpleSegmentationModel_embedding_self_distillation,_SimpleSegmentationModel_Metric
__all__ = ["DeepLabV3"]
class DeepLabV3(_SimpleSegmentationModel):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabV3_metric(_SimpleSegmentationModel_Metric):
pass
class DeepLabV3_embedding(_SimpleSegmentationModel_embedding):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
class DeepLabV3_embedding_self_distillation(_SimpleSegmentationModel_embedding_self_distillation):
"""
Implements DeepLabV3 model from
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
classifier (nn.Module): module that takes the "out" element returned from
the backbone and returns a dense prediction.
aux_classifier (nn.Module, optional): auxiliary classifier used during training
"""
pass
# class DeepLabHeadV3Plus(nn.Module):
# def __init__(self, in_channels, low_level_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHeadV3Plus, self).__init__()
# self.project = nn.Sequential(
# nn.Conv2d(low_level_channels, 48, 1, bias=False),
# nn.BatchNorm2d(48),
# nn.ReLU(inplace=True),
# )
#
# self.aspp = ASPP(in_channels, aspp_dilate)
#
# self.classifier = nn.Sequential(
# nn.Conv2d(304, 256, 3, padding=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, num_classes, 1)
# )
# self._init_weight()
#
# def forward(self, feature):
# low_level_feature = self.project(feature['low_level'])
# output_feature = self.aspp(feature['out'])
# output_feature = F.interpolate(output_feature, size=low_level_feature.shape[2:], mode='bilinear',
# align_corners=False)
# return self.classifier(torch.cat([low_level_feature, output_feature], dim=1))
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# class DeepLabHead(nn.Module):
# def __init__(self, in_channels, num_classes, aspp_dilate=[12, 24, 36]):
# super(DeepLabHead, self).__init__()
#
# self.classifier = nn.Sequential(
# ASPP(in_channels, aspp_dilate),
# nn.Conv2d(256, 256, 3, padding=1, bias=False),
# nn.BatchNorm2d(256),
# nn.ReLU(inplace=True),
# nn.Conv2d(256, num_classes, 1)
# )
# self._init_weight()
#
# def forward(self, feature):
# return self.classifier( feature['out'] )
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
#
# class AtrousSeparableConvolution(nn.Module):
# """ Atrous Separable Convolution
# """
# def __init__(self, in_channels, out_channels, kernel_size,
# stride=1, padding=0, dilation=1, bias=True):
# super(AtrousSeparableConvolution, self).__init__()
# self.body = nn.Sequential(
# # Separable Conv
# nn.Conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=in_channels ),
# # PointWise Conv
# nn.Conv2d( in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias),
# )
#
# self._init_weight()
#
# def forward(self, x):
# return self.body(x)
#
# def _init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
#
# class ASPPConv(nn.Sequential):
# def __init__(self, in_channels, out_channels, dilation):
# modules = [
# nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True)
# ]
# super(ASPPConv, self).__init__(*modules)
#
# class ASPPPooling(nn.Sequential):
# def __init__(self, in_channels, out_channels):
# super(ASPPPooling, self).__init__(
# nn.AdaptiveAvgPool2d(1),
# nn.Conv2d(in_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True))
#
# def forward(self, x):
# size = x.shape[-2:]
# x = super(ASPPPooling, self).forward(x)
# return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
#
# class ASPP(nn.Module):
# def __init__(self, in_channels, atrous_rates):
# super(ASPP, self).__init__()
# out_channels = 256
# modules = []
# modules.append(nn.Sequential(
# nn.Conv2d(in_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True)))
#
# rate1, rate2, rate3 = tuple(atrous_rates)
# modules.append(ASPPConv(in_channels, out_channels, rate1))
# modules.append(ASPPConv(in_channels, out_channels, rate2))
# modules.append(ASPPConv(in_channels, out_channels, rate3))
# modules.append(ASPPPooling(in_channels, out_channels))
#
# self.convs = nn.ModuleList(modules)
#
# self.project = nn.Sequential(
# nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True),
# nn.Dropout(0.1),)
#
# def forward(self, x):
# res = []
# for conv in self.convs:
# res.append(conv(x))
# res = torch.cat(res, dim=1)
# return self.project(res)
#
#
#
# def convert_to_separable_conv(module):
# new_module = module
# if isinstance(module, nn.Conv2d) and module.kernel_size[0]>1:
# new_module = AtrousSeparableConvolution(module.in_channels,
# module.out_channels,
# module.kernel_size,
# module.stride,
# module.padding,
# module.dilation,
# module.bias)
# for name, child in module.named_children():
# new_module.add_module(name, convert_to_separable_conv(child))
# return new_module | 8,740 | 39.281106 | 165 | py |
Subsets and Splits