repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
MNC | MNC-master/tools/demo.py | #!/usr/bin/python
# --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# Standard module
import os
import argparse
import time
import cv2
import numpy as np
# User-defined module
import _init_paths
import caffe
from mnc_config import cfg
from transform.bbox_transform import clip_boxes
from utils.blob import prep_im_for_blob, im_list_to_blob
from transform.mask_transform import gpu_mask_voting
import matplotlib.pyplot as plt
from utils.vis_seg import _convert_pred_to_image, _get_voc_color_map
from PIL import Image
# VOC 20 classes
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='MNC demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default='./models/VGG16/mnc_5stage/test.prototxt', type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default='./data/mnc_model/mnc_model.caffemodel.h5', type=str)
args = parser.parse_args()
return args
def prepare_mnc_args(im, net):
# Prepare image data blob
blobs = {'data': None}
processed_ims = []
im, im_scale_factors = \
prep_im_for_blob(im, cfg.PIXEL_MEANS, cfg.TEST.SCALES[0], cfg.TRAIN.MAX_SIZE)
processed_ims.append(im)
blobs['data'] = im_list_to_blob(processed_ims)
# Prepare image info blob
im_scales = [np.array(im_scale_factors)]
assert len(im_scales) == 1, 'Only single-image batch implemented'
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# Reshape network inputs and do forward
net.blobs['data'].reshape(*blobs['data'].shape)
net.blobs['im_info'].reshape(*blobs['im_info'].shape)
forward_kwargs = {
'data': blobs['data'].astype(np.float32, copy=False),
'im_info': blobs['im_info'].astype(np.float32, copy=False)
}
return forward_kwargs, im_scales
def im_detect(im, net):
forward_kwargs, im_scales = prepare_mnc_args(im, net)
blobs_out = net.forward(**forward_kwargs)
# output we need to collect:
# 1. output from phase1'
rois_phase1 = net.blobs['rois'].data.copy()
masks_phase1 = net.blobs['mask_proposal'].data[...]
scores_phase1 = net.blobs['seg_cls_prob'].data[...]
# 2. output from phase2
rois_phase2 = net.blobs['rois_ext'].data[...]
masks_phase2 = net.blobs['mask_proposal_ext'].data[...]
scores_phase2 = net.blobs['seg_cls_prob_ext'].data[...]
# Boxes are in resized space, we un-scale them back
rois_phase1 = rois_phase1[:, 1:5] / im_scales[0]
rois_phase2 = rois_phase2[:, 1:5] / im_scales[0]
rois_phase1, _ = clip_boxes(rois_phase1, im.shape)
rois_phase2, _ = clip_boxes(rois_phase2, im.shape)
# concatenate two stages to get final network output
masks = np.concatenate((masks_phase1, masks_phase2), axis=0)
boxes = np.concatenate((rois_phase1, rois_phase2), axis=0)
scores = np.concatenate((scores_phase1, scores_phase2), axis=0)
return boxes, masks, scores
def get_vis_dict(result_box, result_mask, img_name, cls_names, vis_thresh=0.5):
box_for_img = []
mask_for_img = []
cls_for_img = []
for cls_ind, cls_name in enumerate(cls_names):
det_for_img = result_box[cls_ind]
seg_for_img = result_mask[cls_ind]
keep_inds = np.where(det_for_img[:, -1] >= vis_thresh)[0]
for keep in keep_inds:
box_for_img.append(det_for_img[keep])
mask_for_img.append(seg_for_img[keep][0])
cls_for_img.append(cls_ind + 1)
res_dict = {'image_name': img_name,
'cls_name': cls_for_img,
'boxes': box_for_img,
'masks': mask_for_img}
return res_dict
if __name__ == '__main__':
args = parse_args()
test_prototxt = args.prototxt
test_model = args.caffemodel
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(test_prototxt, test_model, caffe.TEST)
# Warm up for the first two images
im = 128 * np.ones((300, 500, 3), dtype=np.float32)
for i in xrange(2):
_, _, _ = im_detect(im, net)
im_names = ['2008_000533.jpg', '2008_000910.jpg', '2008_001602.jpg',
'2008_001717.jpg', '2008_008093.jpg']
demo_dir = './data/demo'
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
gt_image = os.path.join(demo_dir, im_name)
im = cv2.imread(gt_image)
start = time.time()
boxes, masks, seg_scores = im_detect(im, net)
end = time.time()
print 'forward time %f' % (end-start)
result_mask, result_box = gpu_mask_voting(masks, boxes, seg_scores, len(CLASSES) + 1,
100, im.shape[1], im.shape[0])
pred_dict = get_vis_dict(result_box, result_mask, 'data/demo/' + im_name, CLASSES)
img_width = im.shape[1]
img_height = im.shape[0]
inst_img, cls_img = _convert_pred_to_image(img_width, img_height, pred_dict)
color_map = _get_voc_color_map()
target_cls_file = os.path.join(demo_dir, 'cls_' + im_name)
cls_out_img = np.zeros((img_height, img_width, 3))
for i in xrange(img_height):
for j in xrange(img_width):
cls_out_img[i][j] = color_map[cls_img[i][j]][::-1]
cv2.imwrite(target_cls_file, cls_out_img)
background = Image.open(gt_image)
mask = Image.open(target_cls_file)
background = background.convert('RGBA')
mask = mask.convert('RGBA')
superimpose_image = Image.blend(background, mask, 0.8)
superimpose_name = os.path.join(demo_dir, 'final_' + im_name)
superimpose_image.save(superimpose_name, 'JPEG')
im = cv2.imread(superimpose_name)
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
classes = pred_dict['cls_name']
for i in xrange(len(classes)):
score = pred_dict['boxes'][i][-1]
bbox = pred_dict['boxes'][i][:4]
cls_ind = classes[i] - 1
ax.text(bbox[0], bbox[1] - 8,
'{:s} {:.4f}'.format(CLASSES[cls_ind], score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
plt.axis('off')
plt.tight_layout()
plt.draw()
fig.savefig(os.path.join(demo_dir, im_name[:-4]+'.png'))
os.remove(superimpose_name)
os.remove(target_cls_file)
| 7,538 | 38.265625 | 93 | py |
MNC | MNC-master/tools/train_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# Standard module
import argparse
import sys
import pprint
import numpy as np
# User-defined module
import _init_paths
from mnc_config import cfg, cfg_from_file, get_output_dir # config mnc
from db.roidb import attach_roidb
from db.maskdb import attach_maskdb
from caffeWrapper.SolverWrapper import SolverWrapper
import caffe
def parse_args():
""" Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# get imdb and roidb from specified imdb_name
imdb, roidb = attach_roidb(args.imdb_name)
# Faster RCNN doesn't need
if cfg.MNC_MODE or cfg.CFM_MODE:
imdb, maskdb = attach_maskdb(args.imdb_name)
else:
maskdb = None
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb, None)
print 'Output will be saved to `{:s}`'.format(output_dir)
_solver = SolverWrapper(args.solver, roidb, maskdb, output_dir, imdb,
pretrained_model=args.pretrained_model)
print 'Solving...'
_solver.train_model(args.max_iters)
print 'done solving'
| 3,331 | 32.656566 | 78 | py |
MNC | MNC-master/lib/caffeWrapper/TesterWrapper.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import cPickle
import scipy
import numpy as np
import cv2
import heapq
import caffe
from utils.timer import Timer
from nms.nms_wrapper import apply_nms, apply_nms_mask_single
from mnc_config import cfg, get_output_dir
from utils.blob import prep_im_for_blob, im_list_to_blob, prep_im_for_blob_cfm, pred_rois_for_blob
from transform.bbox_transform import clip_boxes, bbox_transform_inv, filter_small_boxes
from transform.mask_transform import cpu_mask_voting, gpu_mask_voting
class TesterWrapper(object):
"""
A simple wrapper around Caffe's test forward
"""
def __init__(self, test_prototxt, imdb, test_model, task_name):
# Pre-processing, test whether model stored in binary file or npy files
self.net = caffe.Net(test_prototxt, test_model, caffe.TEST)
self.net.name = os.path.splitext(os.path.basename(test_model))[0]
self.imdb = imdb
self.output_dir = get_output_dir(imdb, self.net)
self.task_name = task_name
# We define some class variables here to avoid defining them many times in every method
self.num_images = len(self.imdb.image_index)
self.num_classes = self.imdb.num_classes
# heuristic: keep an average of 40 detections per class per images prior to nms
self.max_per_set = 40 * self.num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
self.max_per_image = 100
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def get_result(self):
output_dir = self.output_dir
det_file = os.path.join(output_dir, 'res_boxes.pkl')
seg_file = os.path.join(output_dir, 'res_masks.pkl')
if self.task_name == 'det':
self.get_detection_result()
elif self.task_name == 'vis_seg':
self.vis_segmentation_result()
elif self.task_name == 'seg':
if os.path.isfile(det_file) and os.path.isfile(seg_file):
with open(det_file, 'rb') as f:
seg_box = cPickle.load(f)
with open(seg_file, 'rb') as f:
seg_mask = cPickle.load(f)
else:
seg_box, seg_mask = self.get_segmentation_result()
with open(det_file, 'wb') as f:
cPickle.dump(seg_box, f, cPickle.HIGHEST_PROTOCOL)
with open(seg_file, 'wb') as f:
cPickle.dump(seg_mask, f, cPickle.HIGHEST_PROTOCOL)
print 'Evaluating segmentation using MNC 5 stage inference'
self.imdb.evaluate_segmentation(seg_box, seg_mask, output_dir)
elif self.task_name == 'cfm':
if os.path.isfile(det_file) and os.path.isfile(seg_file):
with open(det_file, 'rb') as f:
cfm_boxes = cPickle.load(f)
with open(seg_file, 'rb') as f:
cfm_masks = cPickle.load(f)
else:
cfm_boxes, cfm_masks = self.get_cfm_result()
with open(det_file, 'wb') as f:
cPickle.dump(cfm_boxes, f, cPickle.HIGHEST_PROTOCOL)
with open(seg_file, 'wb') as f:
cPickle.dump(cfm_masks, f, cPickle.HIGHEST_PROTOCOL)
print 'Evaluating segmentation using convolutional feature masking'
self.imdb.evaluate_segmentation(cfm_boxes, cfm_masks, output_dir)
else:
print 'task name only support \'det\', \'seg\', \'cfm\' and \'vis_seg\''
raise NotImplementedError
def get_detection_result(self):
output_dir = self.output_dir
# heuristic: keep an average of 40 detections per class per images prior to NMS
max_per_set = 40 * self.num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
max_per_image = 100
# detection threshold for each class (this is adaptively set based on the
# max_per_set constraint)
thresh = -np.inf * np.ones(self.num_classes)
# top_scores will hold one min heap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(self.num_classes)]
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
for i in xrange(self.num_images):
im = cv2.imread(self.imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = self._detection_forward(im)
_t['im_detect'].toc()
for j in xrange(1, self.num_classes):
inds = np.where(scores[:, j] > thresh[j])[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
top_inds = np.argsort(-cls_scores)[:max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
# push new scores onto the min heap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the min heap and update the class threshold
if len(top_scores[j]) > max_per_set:
while len(top_scores[j]) > max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
all_boxes[j][i] = np.hstack((cls_boxes, cls_scores[:, np.newaxis]))\
.astype(np.float32, copy=False)
print 'process image %d/%d, forward average time %f' % (i, self.num_images,
_t['im_detect'].average_time)
for j in xrange(1, self.num_classes):
for i in xrange(self.num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Applying NMS to all detections'
nms_dets = apply_nms(all_boxes, cfg.TEST.NMS)
print 'Evaluating detections'
self.imdb.evaluate_detections(nms_dets, output_dir)
def vis_segmentation_result(self):
self.imdb.visualization_segmentation(self.output_dir)
def get_segmentation_result(self):
# detection threshold for each class
# (this is adaptively set based on the max_per_set constraint)
thresh = -np.inf * np.ones(self.num_classes)
# top_scores will hold one min heap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(self.num_classes)]
# all detections and segmentation are collected into a list:
# Since the number of dets/segs are of variable size
all_boxes = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
all_masks = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
for i in xrange(self.num_images):
im = cv2.imread(self.imdb.image_path_at(i))
_t['im_detect'].tic()
masks, boxes, seg_scores = self._segmentation_forward(im)
_t['im_detect'].toc()
if not cfg.TEST.USE_MASK_MERGE:
for j in xrange(1, self.num_classes):
inds = np.where(seg_scores[:, j] > thresh[j])[0]
cls_scores = seg_scores[inds, j]
cls_boxes = boxes[inds, :]
cls_masks = masks[inds, :]
top_inds = np.argsort(-cls_scores)[:self.max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
cls_masks = cls_masks[top_inds, :]
# push new scores onto the min heap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the min heap and update the class threshold
if len(top_scores[j]) > self.max_per_set:
while len(top_scores[j]) > self.max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
# Add new boxes into record
box_before_nms = np.hstack((cls_boxes, cls_scores[:, np.newaxis]))\
.astype(np.float32, copy=False)
mask_before_nms = cls_masks.astype(np.float32, copy=False)
all_boxes[j][i], all_masks[j][i] = apply_nms_mask_single(box_before_nms, mask_before_nms, cfg.TEST.NMS)
else:
if cfg.TEST.USE_GPU_MASK_MERGE:
result_mask, result_box = gpu_mask_voting(masks, boxes, seg_scores, self.num_classes,
self.max_per_image, im.shape[1], im.shape[0])
else:
result_box, result_mask = cpu_mask_voting(masks, boxes, seg_scores, self.num_classes,
self.max_per_image, im.shape[1], im.shape[0])
# no need to create a min heap since the output will not exceed max number of detection
for j in xrange(1, self.num_classes):
all_boxes[j][i] = result_box[j-1]
all_masks[j][i] = result_mask[j-1]
print 'process image %d/%d, forward average time %f' % (i, self.num_images,
_t['im_detect'].average_time)
for j in xrange(1, self.num_classes):
for i in xrange(self.num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
all_masks[j][i] = all_masks[j][i][inds]
return all_boxes, all_masks
def _detection_forward(self, im):
""" Detect object classes in an image given object proposals.
Arguments:
im (ndarray): color image to test (in BGR order)
Returns:
box_scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
all_boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
forward_kwargs, im_scales = self._prepare_mnc_args(im)
blobs_out = self.net.forward(**forward_kwargs)
# There are some data we need to get:
# 1. ROIS (with bbox regression)
rois = self.net.blobs['rois'].data.copy()
# un-scale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes, _ = clip_boxes(pred_boxes, im.shape)
# 2. Detection score
scores = blobs_out['cls_prob']
return scores, pred_boxes
def _segmentation_forward(self, im):
forward_kwargs, im_scales = self._prepare_mnc_args(im)
blobs_out = self.net.forward(**forward_kwargs)
# output we need to collect:
# 1. output from phase1'
rois_phase1 = self.net.blobs['rois'].data.copy()
masks_phase1 = self.net.blobs['mask_proposal'].data[...]
scores_phase1 = self.net.blobs['seg_cls_prob'].data[...]
# 2. output from phase2
rois_phase2 = self.net.blobs['rois_ext'].data[...]
masks_phase2 = self.net.blobs['mask_proposal_ext'].data[...]
scores_phase2 = self.net.blobs['seg_cls_prob_ext'].data[...]
# Boxes are in resized space, we un-scale them back
rois_phase1 = rois_phase1[:, 1:5] / im_scales[0]
rois_phase2 = rois_phase2[:, 1:5] / im_scales[0]
rois_phase1, _ = clip_boxes(rois_phase1, im.shape)
rois_phase2, _ = clip_boxes(rois_phase2, im.shape)
# concatenate two stages to get final network output
masks = np.concatenate((masks_phase1, masks_phase2), axis=0)
boxes = np.concatenate((rois_phase1, rois_phase2), axis=0)
scores = np.concatenate((scores_phase1, scores_phase2), axis=0)
return masks, boxes, scores
def _prepare_mnc_args(self, im):
# Prepare image data blob
blobs = {'data': None}
processed_ims = []
im, im_scale_factors = \
prep_im_for_blob(im, cfg.PIXEL_MEANS, cfg.TEST.SCALES[0], cfg.TRAIN.MAX_SIZE)
processed_ims.append(im)
blobs['data'] = im_list_to_blob(processed_ims)
# Prepare image info blob
im_scales = [np.array(im_scale_factors)]
assert len(im_scales) == 1, 'Only single-image batch implemented'
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# Reshape network inputs and do forward
self.net.blobs['data'].reshape(*blobs['data'].shape)
self.net.blobs['im_info'].reshape(*blobs['im_info'].shape)
forward_kwargs = {
'data': blobs['data'].astype(np.float32, copy=False),
'im_info': blobs['im_info'].astype(np.float32, copy=False)
}
return forward_kwargs, im_scales
def get_cfm_result(self):
# detection threshold for each class
# (this is adaptively set based on the max_per_set constraint)
thresh = -np.inf * np.ones(self.num_classes)
# top_scores will hold one min heap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(self.num_classes)]
# all detections and segmentation are collected into a list:
# Since the number of dets/segs are of variable size
all_boxes = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
all_masks = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
for i in xrange(self.num_images):
_t['im_detect'].tic()
masks, boxes, seg_scores = self.cfm_network_forward(i)
for j in xrange(1, self.num_classes):
inds = np.where(seg_scores[:, j] > thresh[j])[0]
cls_scores = seg_scores[inds, j]
cls_boxes = boxes[inds, :]
cls_masks = masks[inds, :]
top_inds = np.argsort(-cls_scores)[:self.max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
cls_masks = cls_masks[top_inds, :]
# push new scores onto the min heap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the min heap and update the class threshold
if len(top_scores[j]) > self.max_per_set:
while len(top_scores[j]) > self.max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
box_before_nms = np.hstack((cls_boxes, cls_scores[:, np.newaxis]))\
.astype(np.float32, copy=False)
mask_before_nms = cls_masks.astype(np.float32, copy=False)
all_boxes[j][i], all_masks[j][i] = apply_nms_mask_single(box_before_nms, mask_before_nms, cfg.TEST.NMS)
_t['im_detect'].toc()
print 'process image %d/%d, forward average time %f' % (i, self.num_images,
_t['im_detect'].average_time)
for j in xrange(1, self.num_classes):
for i in xrange(self.num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
all_masks[j][i] = all_masks[j][i][inds]
return all_boxes, all_masks
def cfm_network_forward(self, im_i):
im = cv2.imread(self.imdb.image_path_at(im_i))
roidb_cache = os.path.join('data/cache/voc_2012_val_mcg_maskdb/', self.imdb._image_index[im_i] + '.mat')
roidb = scipy.io.loadmat(roidb_cache)
boxes = roidb['boxes']
filter_keep = filter_small_boxes(boxes, min_size=16)
boxes = boxes[filter_keep, :]
masks = roidb['masks']
masks = masks[filter_keep, :, :]
assert boxes.shape[0] == masks.shape[0]
# Resize input mask, make it the same as CFM's input size
mask_resize = np.zeros((masks.shape[0], cfg.TEST.CFM_INPUT_MASK_SIZE, cfg.TEST.CFM_INPUT_MASK_SIZE))
for i in xrange(masks.shape[0]):
mask_resize[i, :, :] = cv2.resize(masks[i, :, :].astype(np.float),
(cfg.TEST.CFM_INPUT_MASK_SIZE, cfg.TEST.CFM_INPUT_MASK_SIZE))
masks = mask_resize
# Get top-k proposals from MCG
if cfg.TEST.USE_TOP_K_MCG:
num_keep = min(boxes.shape[0], cfg.TEST.USE_TOP_K_MCG)
boxes = boxes[:num_keep, :]
masks = masks[:num_keep, :, :]
assert boxes.shape[0] == masks.shape[0]
# deal with multi-scale test
# we group several adjacent scales to do forward
_, im_scale_factors = prep_im_for_blob_cfm(im, cfg.TEST.SCALES)
orig_boxes = boxes.copy()
boxes = pred_rois_for_blob(boxes, im_scale_factors)
num_scale_iter = int(np.ceil(len(cfg.TEST.SCALES) / float(cfg.TEST.GROUP_SCALE)))
LO_SCALE = 0
MAX_ROIS_GPU = cfg.TEST.MAX_ROIS_GPU
# set up return results
res_boxes = np.zeros((0, 4), dtype=np.float32)
res_masks = np.zeros((0, 1, cfg.MASK_SIZE, cfg.MASK_SIZE), dtype=np.float32)
res_seg_scores = np.zeros((0, self.num_classes), dtype=np.float32)
for scale_iter in xrange(num_scale_iter):
HI_SCALE = min(LO_SCALE + cfg.TEST.GROUP_SCALE, len(cfg.TEST.SCALES))
inds_this_scale = np.where((boxes[:, 0] >= LO_SCALE) & (boxes[:, 0] < HI_SCALE))[0]
if len(inds_this_scale) == 0:
LO_SCALE += cfg.TEST.GROUP_SCALE
continue
max_rois_this_scale = MAX_ROIS_GPU[scale_iter]
boxes_this_scale = boxes[inds_this_scale, :]
masks_this_scale = masks[inds_this_scale, :, :]
num_iter_this_scale = int(np.ceil(boxes_this_scale.shape[0] / float(max_rois_this_scale)))
# make the batch index of input box start from 0
boxes_this_scale[:, 0] -= min(boxes_this_scale[:, 0])
# re-prepare im blob for this_scale
input_blobs = {}
input_blobs['data'], _ = prep_im_for_blob_cfm(im, cfg.TEST.SCALES[LO_SCALE:HI_SCALE])
input_blobs['data'] = input_blobs['data'].astype(np.float32, copy=False)
input_start = 0
for test_iter in xrange(num_iter_this_scale):
input_end = min(input_start + max_rois_this_scale, boxes_this_scale.shape[0])
input_box = boxes_this_scale[input_start:input_end, :]
input_mask = masks_this_scale[input_start:input_end, :, :]
input_blobs['rois'] = input_box.astype(np.float32, copy=False)
input_blobs['masks'] = input_mask.reshape(input_box.shape[0], 1,
cfg.TEST.CFM_INPUT_MASK_SIZE, cfg.TEST.CFM_INPUT_MASK_SIZE
).astype(np.float32, copy=False)
input_blobs['masks'] = (input_blobs['masks'] >= cfg.BINARIZE_THRESH).astype(np.float32, copy=False)
self.net.blobs['data'].reshape(*input_blobs['data'].shape)
self.net.blobs['rois'].reshape(*input_blobs['rois'].shape)
self.net.blobs['masks'].reshape(*input_blobs['masks'].shape)
blobs_out = self.net.forward(**input_blobs)
output_mask = blobs_out['mask_prob'].copy()
output_score = blobs_out['seg_cls_prob'].copy()
res_masks = np.vstack((res_masks,
output_mask.reshape(
input_box.shape[0], 1, cfg.MASK_SIZE, cfg.MASK_SIZE
).astype(np.float32, copy=False)))
res_seg_scores = np.vstack((res_seg_scores, output_score))
input_start += max_rois_this_scale
res_boxes = np.vstack((res_boxes, orig_boxes[inds_this_scale, :]))
LO_SCALE += cfg.TEST.GROUP_SCALE
return res_masks, res_boxes, res_seg_scores
| 21,633 | 51.13012 | 123 | py |
MNC | MNC-master/lib/caffeWrapper/SolverWrapper.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import numpy as np
from utils.timer import Timer
from mnc_config import cfg
from db.roidb import add_bbox_regression_targets, compute_mcg_mean_std
import caffe
from caffe.proto import caffe_pb2
import google.protobuf as pb2
class SolverWrapper(object):
""" A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, solver_prototxt, roidb, maskdb, output_dir, imdb,
pretrained_model=None):
self.output_dir = output_dir
if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS):
# RPN can only use precomputed normalization because there are no
# fixed statistics to compute a priori
assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED
if cfg.TRAIN.BBOX_REG:
if not cfg.CFM_MODE:
print 'Computing bounding-box regression targets...'
self.bbox_means, self.bbox_stds = add_bbox_regression_targets(roidb)
print 'done'
else:
# Pre-defined mcg bbox_mean and bbox_std
# We store them on disk to avoid disk level IO
# multiple times (mcg boxes are stored on disk)
mean_cache = './data/cache/mcg_bbox_mean.npy'
std_cache = './data/cache/mcg_bbox_std.npy'
roidb_dir = imdb._roidb_path
if os.path.exists(mean_cache) and os.path.exists(std_cache):
self.bbox_means = np.load(mean_cache)
self.bbox_stds = np.load(std_cache)
else:
self.bbox_means, self.bbox_stds = compute_mcg_mean_std(roidb_dir, imdb.num_classes)
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print 'Loading pretrained model weights from {:s}'.format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
if not cfg.CFM_MODE:
self.solver.net.layers[0].set_roidb(roidb)
if cfg.MNC_MODE:
self.solver.net.layers[0].set_maskdb(maskdb)
else:
self.solver.net.layers[0].set_image_info(imdb, self.bbox_means, self.bbox_stds)
def snapshot(self):
""" Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.solver.net
# I'm wondering whether I still need to keep it if only faster-RCNN is needed
scale_bbox_params = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
'bbox_pred' in net.params)
if scale_bbox_params:
# save original values
orig_0 = net.params['bbox_pred'][0].data.copy()
orig_1 = net.params['bbox_pred'][1].data.copy()
if cfg.CFM_MODE:
cfm_mean = self.bbox_means.ravel()
cfm_std = self.bbox_stds.ravel()
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data * cfm_std[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data * cfm_std + cfm_mean)
else:
# scale and shift with transform reg unnormalization; then save snapshot
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data *
self.bbox_stds[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data *
self.bbox_stds + self.bbox_means)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# If we specify an infix in the configuration
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (self.solver_param.snapshot_prefix + infix +
'_iter_{:d}'.format(self.solver.iter) + '.caffemodel')
# For snapshot caffemodel, since MNC use shared parameters
# but caffe save parameters according to layer name instead of
# parameter names, its size will exceed 2GB, which make program crash
# Luckily, we may save it to HDF5 to avoid this issues
if not cfg.MNC_MODE:
filename = os.path.join(self.output_dir, filename)
net.save(str(filename))
else:
filename = os.path.join(self.output_dir, filename + '.h5')
net.save_to_hdf5(str(filename), False)
print 'Wrote snapshot to: {:s}'.format(filename)
if scale_bbox_params:
# restore net to original state
net.params['bbox_pred'][0].data[...] = orig_0
net.params['bbox_pred'][1].data[...] = orig_1
def train_model(self, max_iters):
last_snapshot_iter = -1
timer = Timer()
while self.solver.iter < max_iters:
timer.tic()
self.solver.step(1)
timer.toc()
if self.solver.iter % (10 * self.solver_param.display) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = self.solver.iter
self.snapshot()
if last_snapshot_iter != self.solver.iter:
self.snapshot()
| 6,147 | 43.230216 | 103 | py |
MNC | MNC-master/lib/pylayer/proposal_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import caffe
import numpy as np
import yaml
from mnc_config import cfg
from transform.anchors import generate_anchors
from transform.bbox_transform import clip_boxes, bbox_transform_inv, filter_small_boxes
from nms.nms_wrapper import nms
DEBUG = False
PRINT_GRADIENT = 1
class ProposalLayer(caffe.Layer):
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
self._feat_stride = layer_params['feat_stride']
self._anchors = generate_anchors()
self._num_anchors = self._anchors.shape[0]
self._use_clip = layer_params.get('use_clip', 0)
self._clip_denominator = float(layer_params.get('clip_base', 256))
self._clip_thresh = 1.0 / self._clip_denominator
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
self._top_name_map = {}
top[0].reshape(1, 5)
self._top_name_map['rois'] = 0
# For MNC, we force the output proposals will also be used to train RPN
# this is achieved by passing proposal_index to anchor_target_layer
if str(self.phase) == 'TRAIN':
if cfg.TRAIN.MIX_INDEX:
top[1].reshape(1, 1)
self._top_name_map['proposal_index'] = 1
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted transform deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
assert bottom[0].data.shape[0] == 1, 'Only single item batches are supported'
cfg_key = str(self.phase) # either 'TRAIN' or 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
scores = bottom[0].data[:, self._num_anchors:, :, :]
bbox_deltas = bottom[1].data
im_info = bottom[2].data[0, :]
# 1. Generate proposals from transform deltas and shifted anchors
height, width = scores.shape[-2:]
self._height = height
self._width = width
# Enumerate all shifts
shift_x = np.arange(0, self._width) * self._feat_stride
shift_y = np.arange(0, self._height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
anchors = self._anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
_, keep = clip_boxes(anchors, im_info[:2])
self._anchor_index_before_clip = keep
# Transpose and reshape predicted transform transformations to get them
# into the same order as the anchors:
#
# transform deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via transform transformations
proposals = bbox_transform_inv(anchors, bbox_deltas)
# 2. clip predicted boxes to image
proposals, keep = clip_boxes(proposals, im_info[:2])
# Record the cooresponding index before and after clip
# This step doesn't need unmap
# We need it to decide whether do back propagation
self._proposal_index_before_clip = keep
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = filter_small_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :]
scores = scores[keep]
self._ind_after_filter = keep
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
self._ind_after_sort = order
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
keep = nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
proposals = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
self._proposal_index = keep
blobs = {
'rois': proposals
}
if str(self.phase) == 'TRAIN':
if cfg.TRAIN.MIX_INDEX:
all_rois_index = self._ind_after_filter[self._ind_after_sort[self._proposal_index]].reshape(1, len(keep))
blobs['proposal_index'] = all_rois_index
# Copy data to forward to top layer
for blob_name, blob in blobs.iteritems():
top[self._top_name_map[blob_name]].reshape(*blob.shape)
top[self._top_name_map[blob_name]].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
if propagate_down[1]:
bottom[1].diff.fill(0.0)
# first count only non-zero top gradient to accelerate computing
top_non_zero_ind = np.unique(np.where(abs(top[0].diff[:, :]) > 0)[0])
proposal_index = np.asarray(self._proposal_index)
# unmap indexes to the original scale
unmap_val = self._ind_after_filter[self._ind_after_sort[proposal_index[top_non_zero_ind]]]
# not back propagate gradient if proposals/anchors are out of image boundary
# this is a 0/1 mask so we just multiply them when calculating bottom gradient
weight_out_proposal = np.in1d(unmap_val, self._proposal_index_before_clip)
weight_out_anchor = np.in1d(unmap_val, self._anchor_index_before_clip)
# unmap_val are arranged as (H * W * A) as stated in forward comment
# with A as the fastest dimension (which is different from caffe)
c = unmap_val % self._num_anchors
w = (unmap_val / self._num_anchors) % self._width
h = (unmap_val / self._num_anchors / self._width) % self._height
# width and height should be in feature map scale
anchor_w = (self._anchors[c, 2] - self._anchors[c, 0])
anchor_h = (self._anchors[c, 3] - self._anchors[c, 1])
dfdx1 = top[0].diff[top_non_zero_ind, 1]
dfdy1 = top[0].diff[top_non_zero_ind, 2]
dfdx2 = top[0].diff[top_non_zero_ind, 3]
dfdy2 = top[0].diff[top_non_zero_ind, 4]
dfdxc = dfdx1 + dfdx2
dfdyc = dfdy1 + dfdy2
dfdw = 0.5 * (dfdx2 - dfdx1)
dfdh = 0.5 * (dfdy2 - dfdy1)
bottom[1].diff[0, 4*c, h, w] = \
dfdxc * anchor_w * weight_out_proposal * weight_out_anchor
bottom[1].diff[0, 4*c+1, h, w] = \
dfdyc * anchor_h * weight_out_proposal * weight_out_anchor
bottom[1].diff[0, 4*c+2, h, w] = \
dfdw * np.exp(bottom[1].data[0, 4*c+2, h, w]) * anchor_w * weight_out_proposal * weight_out_anchor
bottom[1].diff[0, 4*c+3, h, w] = \
dfdh * np.exp(bottom[1].data[0, 4*c+3, h, w]) * anchor_h * weight_out_proposal * weight_out_anchor
# if use gradient clip, constraint gradient inside [-thresh, thresh]
if self._use_clip:
bottom[1].diff[0, 4*c, h, w] = np.minimum(np.maximum(
bottom[1].diff[0, 4*c, h, w], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[0, 4*c+1, h, w] = np.minimum(np.maximum(
bottom[1].diff[0, 4*c+1, h, w], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[0, 4*c+2, h, w] = np.minimum(np.maximum(
bottom[1].diff[0, 4*c+2, h, w], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[0, 4*c+3, h, w] = np.minimum(np.maximum(
bottom[1].diff[0, 4*c+3, h, w], -self._clip_thresh), self._clip_thresh)
| 10,386 | 43.965368 | 121 | py |
MNC | MNC-master/lib/pylayer/mnc_data_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import cv2
import numpy as np
import yaml
import caffe
from mnc_config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
class MNCDataLayer(caffe.Layer):
"""
Provide image, image w/h/scale, gt boxes/masks and mask info to upper layers
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {}
# data blob: holds a batch of N images, each with 3 channels
top[0].reshape(cfg.TRAIN.IMS_PER_BATCH, 3, max(cfg.TRAIN.SCALES), cfg.TRAIN.MAX_SIZE)
self._name_to_top_map['data'] = 0
assert(cfg.TRAIN.HAS_RPN, 'Use RPN for this project')
# Just pseudo setup
top[1].reshape(1, 3)
self._name_to_top_map['im_info'] = 1
top[2].reshape(1, 4)
self._name_to_top_map['gt_boxes'] = 2
if cfg.MNC_MODE:
top[3].reshape(1, 21, 21)
self._name_to_top_map['gt_masks'] = 3
top[4].reshape(1, 3)
self._name_to_top_map['mask_info'] = 4
assert len(top) == len(self._name_to_top_map)
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*blob.shape)
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def set_roidb(self, roidb):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds()
def set_maskdb(self, maskdb):
self._maskdb = maskdb
self._shuffle_roidb_inds()
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
inds = np.reshape(inds, (-1, 2))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_image_blob(self, roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = 1 # len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb['image'])
if roidb['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _get_next_minibatch(self):
"""
Return the blobs to be used for the next minibatch.
"""
assert cfg.TRAIN.IMS_PER_BATCH == 1, 'Only single batch forwarding is supported'
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur]
self._cur += 1
roidb = self._roidb[db_inds]
random_scale_inds = np.random.randint(0, high=len(cfg.TRAIN.SCALES), size=1)
im_blob, im_scales = self._get_image_blob(roidb, random_scale_inds)
gt_label = np.where(roidb['gt_classes'] != 0)[0]
gt_boxes = np.hstack((roidb['boxes'][gt_label, :] * im_scales[0],
roidb['gt_classes'][gt_label, np.newaxis])).astype(np.float32)
blobs = {
'data': im_blob,
'gt_boxes': gt_boxes,
'im_info': np.array([[im_blob.shape[2], im_blob.shape[3], im_scales[0]]], dtype=np.float32)
}
if cfg.MNC_MODE:
maskdb = self._maskdb[db_inds]
mask_list = maskdb['gt_masks']
mask_max_x = maskdb['mask_max'][0]
mask_max_y = maskdb['mask_max'][1]
gt_masks = np.zeros((len(mask_list), mask_max_y, mask_max_x))
mask_info = np.zeros((len(mask_list), 2))
for j in xrange(len(mask_list)):
mask = mask_list[j]
mask_x = mask.shape[1]
mask_y = mask.shape[0]
gt_masks[j, 0:mask_y, 0:mask_x] = mask
mask_info[j, 0] = mask_y
mask_info[j, 1] = mask_x
blobs['gt_masks'] = gt_masks
blobs['mask_info'] = mask_info
return blobs
| 5,826 | 37.589404 | 103 | py |
MNC | MNC-master/lib/pylayer/proposal_target_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import caffe
import yaml
import numpy as np
import numpy.random as npr
from mnc_config import cfg
from transform.bbox_transform import \
bbox_transform, bbox_compute_targets, \
scale_boxes, get_bbox_regression_label
from transform.anchors import generate_anchors
from transform.mask_transform import intersect_mask
from utils.cython_bbox import bbox_overlaps
class ProposalTargetLayer(caffe.Layer):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
self._anchors = generate_anchors()
self._num_anchors = self._anchors.shape[0]
self._num_classes = layer_params['num_classes']
self._bp_all = layer_params.get('bp_all', True)
self._top_name_map = {}
top[0].reshape(1, 5)
self._top_name_map['rois'] = 0
top[1].reshape(1, 1)
self._top_name_map['labels'] = 1
top[2].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_targets'] = 2
top[3].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_inside_weights'] = 3
top[4].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_outside_weights'] = 4
# Add mask-related information
if cfg.MNC_MODE:
top[5].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_targets'] = 5
top[6].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_weight'] = 6
top[7].reshape(1, 4)
self._top_name_map['gt_masks_info'] = 7
if cfg.TRAIN.MIX_INDEX:
top[8].reshape(1, 4)
self._top_name_map['fg_inds'] = 8
top[9].reshape(1, 4)
self._top_name_map['bg_inds'] = 9
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def forward(self, bottom, top):
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
all_rois = bottom[0].data
# GT boxes (x1, y1, x2, y2, label)
gt_boxes = bottom[1].data
im_info = bottom[2].data[0, :]
im_scale = im_info[2]
# get original masks
if cfg.MNC_MODE:
gt_masks = bottom[3].data
mask_info = bottom[4].data
else:
gt_masks = None
mask_info = None
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes[:, :-1])))
)
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
num_images = 1
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
# Sample rois with classification labels and bounding box regression targets
blobs, fg_inds, bg_inds, keep_inds = _sample_rois(
all_rois, gt_boxes, rois_per_image, self._num_classes, gt_masks, im_scale, mask_info)
self._keep_ind = keep_inds if self._bp_all else fg_inds
for blob_name, blob in blobs.iteritems():
top[self._top_name_map[blob_name]].reshape(*blob.shape)
top[self._top_name_map[blob_name]].data[...] = blob.astype(np.float32, copy=False)
if cfg.TRAIN.MIX_INDEX:
all_rois_index = bottom[5].data
fg_inds = fg_inds[fg_inds < all_rois_index.shape[1]].astype(int)
fg_inds = all_rois_index[0, fg_inds]
bg_inds = all_rois_index[0, bg_inds.astype(int)]
top[self._top_name_map['fg_inds']].reshape(*fg_inds.shape)
top[self._top_name_map['fg_inds']].data[...] = fg_inds
top[self._top_name_map['bg_inds']].reshape(*bg_inds.shape)
top[self._top_name_map['bg_inds']].data[...] = bg_inds
def backward(self, top, propagate_down, bottom):
if propagate_down[0]:
bottom[0].diff.fill(0.)
# Eliminate gt_inds from the keep inds
valid_inds = np.where(self._keep_ind < bottom[0].diff.shape[0])[0]
valid_bot_inds = self._keep_ind[valid_inds].astype(int)
bottom[0].diff[valid_bot_inds, :] = top[0].diff[valid_inds, :]
def _sample_rois(all_rois, gt_boxes, rois_per_image, num_classes, gt_masks, im_scale, mask_info):
"""
Generate a random sample of RoIs comprising
foreground and background examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# Sample foreground indexes
fg_inds = []
for i in xrange(len(cfg.TRAIN.FG_FRACTION)):
cur_inds = np.where((max_overlaps >= cfg.TRAIN.FG_THRESH_LO[i]) &
(max_overlaps <= cfg.TRAIN.FG_THRESH_HI[i]))[0]
cur_rois_this_image = min(cur_inds.size, np.round(rois_per_image *
cfg.TRAIN.FG_FRACTION[i]))
if cur_inds.size > 0:
cur_inds = npr.choice(cur_inds, size=cur_rois_this_image, replace=False)
fg_inds = np.hstack((fg_inds, cur_inds))
fg_inds = np.unique(fg_inds)
fg_rois_per_image = fg_inds.size
# Sample background indexes according to number of foreground
bg_rois_per_this_image = rois_per_image - fg_rois_per_image
bg_inds = []
for i in xrange(len(cfg.TRAIN.BG_FRACTION)):
cur_inds = np.where((max_overlaps >= cfg.TRAIN.BG_THRESH_LO[i]) &
(max_overlaps <= cfg.TRAIN.BG_THRESH_HI[i]))[0]
cur_rois_this_image = min(cur_inds.size, np.round(bg_rois_per_this_image *
cfg.TRAIN.BG_FRACTION[i]))
if cur_inds.size > 0:
cur_inds = npr.choice(cur_inds, size=cur_rois_this_image, replace=False)
bg_inds = np.hstack((bg_inds, cur_inds))
bg_inds = np.unique(bg_inds)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds).astype(int)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_image:] = 0
rois = all_rois[keep_inds]
bbox_target_data = bbox_compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], normalize=True)
bbox_target_data = np.hstack((labels[:, np.newaxis], bbox_target_data))\
.astype(np.float32, copy=False)
bbox_targets, bbox_inside_weights = get_bbox_regression_label(
bbox_target_data, num_classes)
bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)
blobs = {
'rois': rois,
'labels': labels,
'bbox_targets': bbox_targets,
'bbox_inside_weights': bbox_inside_weights,
'bbox_outside_weights': bbox_outside_weights
}
if cfg.MNC_MODE:
scaled_rois = rois[:, 1:5] / float(im_scale)
# map to original image space
scaled_gt_boxes = gt_boxes[:, :4] / float(im_scale)
pos_masks = np.zeros((len(keep_inds), 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
top_mask_info = np.zeros((len(keep_inds), 12))
top_mask_info[len(fg_inds):, :] = -1
for i, val in enumerate(fg_inds):
gt_box = scaled_gt_boxes[gt_assignment[val]]
gt_box = np.around(gt_box).astype(int)
ex_box = np.around(scaled_rois[i]).astype(int)
gt_mask = gt_masks[gt_assignment[val]]
gt_mask_info = mask_info[gt_assignment[val]]
gt_mask = gt_mask[0:gt_mask_info[0], 0:gt_mask_info[1]]
# calculate mask regression targets
# (intersection of bounding box and gt mask)
ex_mask = intersect_mask(ex_box, gt_box, gt_mask)
pos_masks[i, ...] = ex_mask
top_mask_info[i, 0] = gt_assignment[val]
top_mask_info[i, 1] = gt_mask_info[0]
top_mask_info[i, 2] = gt_mask_info[1]
top_mask_info[i, 3] = labels[i]
top_mask_info[i, 4:8] = ex_box
top_mask_info[i, 8:12] = gt_box
mask_weight = np.zeros((rois.shape[0], 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
# only assign box-level foreground as positive mask regression
mask_weight[0:len(fg_inds), :, :, :] = 1
blobs['mask_targets'] = pos_masks
blobs['mask_weight'] = mask_weight
blobs['gt_masks_info'] = top_mask_info
return blobs, fg_inds, bg_inds, keep_inds
| 9,255 | 41.654378 | 97 | py |
MNC | MNC-master/lib/pylayer/mask_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import caffe
import cv2
import numpy as np
from transform.mask_transform import mask_overlap
from mnc_config import cfg
class MaskLayer(caffe.Layer):
"""
This layer Take input from sigmoid predicted masks
Assign each label for segmentation classifier according
to region overlap
"""
def setup(self, bottom, top):
self._phase = str(self.phase)
self._top_name_map = {}
top[0].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_proposal'] = 0
if self._phase == 'TRAIN':
top[1].reshape(1, 1)
self._top_name_map['mask_proposal_label'] = 1
def reshape(self, bottom, top):
"""
Reshaping happens during the call to forward
"""
pass
def forward(self, bottom, top):
if str(self.phase) == 'TRAIN':
blobs = self.forward_train(bottom, top)
elif str(self.phase) == 'TEST':
blobs = self.forward_test(bottom, top)
else:
print 'Unrecognized phase'
raise NotImplementedError
for blob_name, blob in blobs.iteritems():
top[self._top_name_map[blob_name]].reshape(*blob.shape)
top[self._top_name_map[blob_name]].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
if propagate_down[0]:
bottom[0].diff.fill(0.)
top_grad = top[0].diff.reshape(top[0].diff.shape[0], cfg.MASK_SIZE * cfg.MASK_SIZE)
bottom[0].diff[self.pos_sample, :] = top_grad[self.pos_sample, :]
def forward_train(self, bottom, top):
# Take sigmoid prediction as input
mask_pred = bottom[0].data
# get ground truth mask and labels
gt_masks = bottom[1].data
gt_masks_info = bottom[2].data
num_mask_pred = mask_pred.shape[0]
top_label = np.zeros((gt_masks_info.shape[0], 1))
# 2. Calculate region overlap
# Since the target gt mask may have different size
# We need to resize predicted masks into different sizes
mask_size = cfg.MASK_SIZE
for i in xrange(num_mask_pred):
# if the bounding box is itself background
if gt_masks_info[i][0] == -1:
top_label[i][0] = 0
continue
else:
info = gt_masks_info[i]
gt_mask = gt_masks[info[0]][0:info[1], 0:info[2]]
ex_mask = mask_pred[i].reshape((mask_size, mask_size))
ex_box = np.round(info[4:8]).astype(int)
gt_box = np.round(info[8:12]).astype(int)
# resize to large gt_masks, note cv2.resize is column first
ex_mask = cv2.resize(ex_mask.astype(np.float32), (ex_box[2] - ex_box[0] + 1,
ex_box[3] - ex_box[1] + 1))
ex_mask = ex_mask >= cfg.BINARIZE_THRESH
top_label[i][0] = 0 if mask_overlap(ex_box, gt_box, ex_mask, gt_mask) < cfg.TRAIN.FG_SEG_THRESH else info[3]
# output continuous mask for MNC
resized_mask_pred = mask_pred.reshape((num_mask_pred, 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
self.pos_sample = np.where(top_label > 0)[0]
blobs = {
'mask_proposal': resized_mask_pred,
'mask_proposal_label': top_label
}
return blobs
def forward_test(self, bottom, top):
mask_pred = bottom[0].data
num_mask_pred = mask_pred.shape[0]
resized_mask_pred = mask_pred.reshape((num_mask_pred, 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
blobs = {
'mask_proposal': resized_mask_pred
}
return blobs
| 3,988 | 37.728155 | 124 | py |
MNC | MNC-master/lib/pylayer/stage_bridge_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import caffe
import numpy as np
import yaml
from transform.bbox_transform import \
bbox_transform_inv, bbox_compute_targets, \
clip_boxes, get_bbox_regression_label
from transform.mask_transform import intersect_mask
from mnc_config import cfg
from utils.cython_bbox import bbox_overlaps
class StageBridgeLayer(caffe.Layer):
"""
This layer take input from bounding box prediction
and output a set of new rois after applying transformation
It will also provide mask/bbox regression targets
during training phase
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
# bottom 0 is ~ n ROIs to train Fast RCNN
# bottom 1 is ~ n * 4(1+c) bbox prediction
# bottom 2 is ~ n * (1+c) bbox scores (seg classification)
self._phase = str(self.phase)
if self._phase == 'TRAIN':
self._use_clip = layer_params['use_clip']
self._clip_denominator = float(layer_params.get('clip_base', 64))
self._clip_thresh = 1.0 / self._clip_denominator
self._feat_stride = layer_params['feat_stride']
self._num_classes = layer_params['num_classes']
# meaning of top blobs speak for themselves
self._top_name_map = {}
if self._phase == 'TRAIN':
top[0].reshape(1, 5)
self._top_name_map['rois'] = 0
top[1].reshape(1, 1)
self._top_name_map['labels'] = 1
top[2].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_targets'] = 2
top[3].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_weight'] = 3
top[4].reshape(1, 4)
self._top_name_map['gt_mask_info'] = 4
top[5].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_targets'] = 5
top[6].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_inside_weights'] = 6
top[7].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_outside_weights'] = 7
elif self._phase == 'TEST':
top[0].reshape(1, 5)
self._top_name_map['rois'] = 0
else:
print 'Unrecognized phase'
raise NotImplementedError
def reshape(self, bottom, top):
# reshape happens during forward
pass
def forward(self, bottom, top):
if str(self.phase) == 'TRAIN':
blobs = self.forward_train(bottom, top)
elif str(self.phase) == 'TEST':
blobs = self.forward_test(bottom, top)
else:
print 'Unrecognized phase'
raise NotImplementedError
for blob_name, blob in blobs.iteritems():
top[self._top_name_map[blob_name]].reshape(*blob.shape)
top[self._top_name_map[blob_name]].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""
Description:
We need to implement bp for 2 bottoms:
The top diff is x_new, y_new, w_new, h_new
"""
deltas = bottom[1].data
dfdxc = top[0].diff[:, 1]
dfdyc = top[0].diff[:, 2]
dfdw = top[0].diff[:, 3]
dfdh = top[0].diff[:, 4]
W_old = bottom[0].data[:, 2] - bottom[0].data[:, 0]
H_old = bottom[0].data[:, 3] - bottom[0].data[:, 1]
if propagate_down[0]:
bottom[0].diff.fill(0.)
for ind, i in enumerate(self._keep_inds):
if i >= bottom[0].diff.shape[0] or self._bbox_reg_labels[i] == 0:
continue
delta_x = deltas[i, 4*self._bbox_reg_labels[i]]
delta_y = deltas[i, 4*self._bbox_reg_labels[i]+1]
delta_w = deltas[i, 4*self._bbox_reg_labels[i]+2]
delta_h = deltas[i, 4*self._bbox_reg_labels[i]+3]
bottom[0].diff[i, 1] = dfdxc[ind]
bottom[0].diff[i, 2] = dfdyc[ind]
bottom[0].diff[i, 3] = dfdw[ind] * (delta_x + np.exp(delta_w))
bottom[0].diff[i, 4] = dfdh[ind] * (delta_y + np.exp(delta_h))
if propagate_down[1]:
bottom[1].diff.fill(0.)
for ind, i in enumerate(self._keep_inds):
if i >= bottom[1].diff.shape[0] or i not in self._clip_keep or self._bbox_reg_labels[i] == 0:
continue
delta_w = deltas[i, 4*self._bbox_reg_labels[i]+2]
delta_h = deltas[i, 4*self._bbox_reg_labels[i]+3]
bottom[1].diff[i, 4*self._bbox_reg_labels[i]] = dfdxc[ind] * W_old[i]
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+1] = dfdyc[ind] * H_old[i]
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+2] = dfdw[ind] * np.exp(delta_w) * W_old[i]
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+3] = dfdh[ind] * np.exp(delta_h) * H_old[i]
if self._use_clip:
bottom[1].diff[i, 4*self._bbox_reg_labels[i]] = np.minimum(np.maximum(
bottom[1].diff[i, 4*self._bbox_reg_labels[i]], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+1] = np.minimum(np.maximum(
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+1], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+2] = np.minimum(np.maximum(
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+2], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+3] = np.minimum(np.maximum(
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+3], -self._clip_thresh), self._clip_thresh)
def forward_train(self, bottom, top):
"""
During forward, we need to do several things:
1. Apply bounding box regression output which has highest
classification score to proposed ROIs
2. Sample ROIs based on there current overlaps, assign labels
on them
3. Make mask regression targets and positive/negative weights,
just like the proposal_target_layer
"""
rois = bottom[0].data
bbox_deltas = bottom[1].data
# Apply bounding box regression according to maximum segmentation score
seg_scores = bottom[2].data
self._bbox_reg_labels = seg_scores[:, 1:].argmax(axis=1) + 1
gt_boxes = bottom[3].data
gt_masks = bottom[4].data
im_info = bottom[5].data[0, :]
mask_info = bottom[6].data
# select bbox_deltas according to
artificial_deltas = np.zeros((rois.shape[0], 4))
for i in xrange(rois.shape[0]):
artificial_deltas[i, :] = bbox_deltas[i, 4*self._bbox_reg_labels[i]:4*(self._bbox_reg_labels[i]+1)]
artificial_deltas[self._bbox_reg_labels == 0, :] = 0
all_rois = np.zeros((rois.shape[0], 5))
all_rois[:, 0] = 0
all_rois[:, 1:5] = bbox_transform_inv(rois[:, 1:5], artificial_deltas)
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes[:, :-1])))
)
all_rois[:, 1:5], self._clip_keep = clip_boxes(all_rois[:, 1:5], im_info[:2])
labels, rois_out, fg_inds, keep_inds, mask_targets, top_mask_info, bbox_targets, bbox_inside_weights = \
self._sample_output(all_rois, gt_boxes, im_info[2], gt_masks, mask_info)
bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)
self._keep_inds = keep_inds
mask_weight = np.zeros((rois_out.shape[0], 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
mask_weight[0:len(fg_inds), :, :, :] = 1
blobs = {
'rois': rois_out,
'labels': labels,
'mask_targets': mask_targets,
'mask_weight': mask_weight,
'gt_mask_info': top_mask_info,
'bbox_targets': bbox_targets,
'bbox_inside_weights': bbox_inside_weights,
'bbox_outside_weights': bbox_outside_weights
}
return blobs
def _sample_output(self, all_rois, gt_boxes, im_scale, gt_masks, mask_info):
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# Sample foreground indexes
fg_inds = np.where(max_overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
bg_inds = np.where(max_overlaps < cfg.TRAIN.BBOX_THRESH)[0]
keep_inds = np.append(fg_inds, bg_inds).astype(int)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[len(fg_inds):] = 0
rois = all_rois[keep_inds]
bbox_target_data = bbox_compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], normalize=True)
bbox_target_data = np.hstack((labels[:, np.newaxis], bbox_target_data))\
.astype(np.float32, copy=False)
bbox_targets, bbox_inside_weights = get_bbox_regression_label(
bbox_target_data, self._num_classes)
scaled_rois = rois[:, 1:5] / float(im_scale)
scaled_gt_boxes = gt_boxes[:, :4] / float(im_scale)
pos_masks = np.zeros((len(keep_inds), 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
top_mask_info = np.zeros((len(keep_inds), 12))
top_mask_info[len(fg_inds):, :] = -1
for i, val in enumerate(fg_inds):
gt_box = scaled_gt_boxes[gt_assignment[val]]
gt_box = np.around(gt_box).astype(int)
ex_box = np.around(scaled_rois[i]).astype(int)
gt_mask = gt_masks[gt_assignment[val]]
gt_mask_info = mask_info[gt_assignment[val]]
gt_mask = gt_mask[0:gt_mask_info[0], 0:gt_mask_info[1]]
# regression targets is the intersection of bounding box and gt mask
ex_mask = intersect_mask(ex_box, gt_box, gt_mask)
pos_masks[i, ...] = ex_mask
top_mask_info[i, 0] = gt_assignment[val]
top_mask_info[i, 1] = gt_mask_info[0]
top_mask_info[i, 2] = gt_mask_info[1]
top_mask_info[i, 3] = labels[i]
top_mask_info[i, 4:8] = ex_box
top_mask_info[i, 8:12] = gt_box
return labels, rois, fg_inds, keep_inds, pos_masks, top_mask_info, bbox_targets, bbox_inside_weights
def forward_test(self, bottom, top):
rois = bottom[0].data
bbox_deltas = bottom[1].data
# get ~ n * 4(1+c) new rois
all_rois = bbox_transform_inv(rois[:, 1:5], bbox_deltas)
scores = bottom[2].data
im_info = bottom[3].data
# get highest scored category's bounding box regressor
score_max = scores.argmax(axis=1)
rois_out = np.zeros((rois.shape[0], 5))
# Single batch training
rois_out[:, 0] = 0
for i in xrange(len(score_max)):
rois_out[i, 1:5] = all_rois[i, 4*score_max[i]:4*(score_max[i]+1)]
rois_out[:, 1:5], _ = clip_boxes(rois_out[:, 1:5], im_info[0, :2])
blobs = {
'rois': rois_out
}
return blobs
| 11,685 | 44.648438 | 112 | py |
MNC | MNC-master/lib/pylayer/anchor_target_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import yaml
import numpy as np
import caffe
from transform.anchors import generate_anchors
from utils.cython_bbox import bbox_overlaps
from utils.unmap import unmap
from mnc_config import cfg
from transform.bbox_transform import bbox_transform
class AnchorTargetLayer(caffe.Layer):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
self._anchors = generate_anchors()
self._num_anchors = self._anchors.shape[0]
layer_params = yaml.load(self.param_str_)
self._feat_stride = layer_params['feat_stride']
# allow boxes to sit over the edge by a small amount
self._allowed_border = layer_params.get('allowed_border', 0)
height, width = bottom[0].data.shape[-2:]
A = self._num_anchors
# labels
top[0].reshape(1, 1, A * height, width)
# bbox_targets
top[1].reshape(1, A * 4, height, width)
# bbox_inside_weights
top[2].reshape(1, A * 4, height, width)
# bbox_outside_weights
top[3].reshape(1, A * 4, height, width)
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward"""
pass
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate 9 anchor boxes centered on cell i
# apply predicted transform deltas at cell i to each of the 9 anchors
# filter out-of-image anchors
# measure GT overlap
#
# Output target referenced value
height, width = bottom[0].data.shape[-2:]
assert bottom[0].data.shape[0] == 1, 'Only single item batches are supported'
gt_boxes = bottom[1].data
im_info = bottom[2].data[0, :]
# 1. Generate proposals from shifted anchors
# note: unlike proposal layer, in this stage, no deltas involved
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
all_anchors = (self._anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
# only keep anchors inside the image
inds_inside = np.where(
(all_anchors[:, 0] >= -self._allowed_border) &
(all_anchors[:, 1] >= -self._allowed_border) &
(all_anchors[:, 2] < im_info[1] + self._allowed_border) & # width
(all_anchors[:, 3] < im_info[0] + self._allowed_border) # height
)[0]
# 2. For each anchor, we assign positive or negative
anchors = all_anchors[inds_inside, :]
# label: 1 is positive, 0 is negative, -1 is don't care
labels = np.empty((len(inds_inside), ), dtype=np.float32)
labels.fill(-1)
# overlaps between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(
np.ascontiguousarray(anchors, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps,
np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# We assign two types of anchors as positve
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = np.random.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = np.random.choice(
bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labels[disable_inds] = -1
if cfg.TRAIN.MIX_INDEX:
bottom_fg = bottom[3].data
bottom_bg = bottom[4].data
unmapped_fg_ind = []
unmapped_bg_ind = []
for i in list(bottom_fg):
zal = np.where(i == inds_inside)[0]
if len(zal) > 0:
unmapped_fg_ind.append(zal[0])
for i in list(bottom_bg):
zal = np.where(i == inds_inside)[0]
if len(zal) > 0:
unmapped_bg_ind.append(zal[0])
labels[unmapped_bg_ind] = 0
labels[unmapped_fg_ind] = 1
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])
bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)
bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
positive_weights = np.ones((1, 4)) * 1.0 / num_examples
negative_weights = np.ones((1, 4)) * 1.0 / num_examples
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &
(cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /
np.sum(labels == 1))
negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /
np.sum(labels == 0))
bbox_outside_weights[labels == 1, :] = positive_weights
bbox_outside_weights[labels == 0, :] = negative_weights
# Currently all the indices are in the clipped index space
# we map up to original set of anchors
# In this process, we need to set clipped boxes as label -1, weights 0
labels = unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_inside_weights = unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)
# labels
labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, 1, A * height, width))
top[0].reshape(*labels.shape)
top[0].data[...] = labels
# bbox_targets
bbox_targets = bbox_targets \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
top[1].reshape(*bbox_targets.shape)
top[1].data[...] = bbox_targets
# bbox_inside_weights
bbox_inside_weights = bbox_inside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_inside_weights.shape[2] == height
assert bbox_inside_weights.shape[3] == width
top[2].reshape(*bbox_inside_weights.shape)
top[2].data[...] = bbox_inside_weights
# bbox_outside_weights
bbox_outside_weights = bbox_outside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_outside_weights.shape[2] == height
assert bbox_outside_weights.shape[3] == width
top[3].reshape(*bbox_outside_weights.shape)
top[3].data[...] = bbox_outside_weights
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def _compute_targets(ex_rois, gt_rois):
"""
Compute bounding-box regression targets for an image.
Parameters:
-----------
ex_rois: ROIs from external source (selective search or RPN)
gt_rois: ground truth rois
Returns:
---------
The correct relative value for this anchor (combined when generate proposal)
"""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 5
return bbox_transform(ex_rois, gt_rois[:, :4]).astype(np.float32, copy=False) | 9,757 | 40.879828 | 94 | py |
MNC | MNC-master/lib/pylayer/cfm_data_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import cv2
import yaml
import scipy
import numpy as np
import numpy.random as npr
import caffe
from mnc_config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
from transform.bbox_transform import get_bbox_regression_label, bbox_compute_targets
class CFMDataLayer(caffe.Layer):
"""
Provide image, image w/h/scale, gt boxes/masks and mask info to upper layers
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {}
self.input_mz = cfg.TEST.CFM_INPUT_MASK_SIZE
# For CFM architect, we have nine entries since there is no intermediate layer
top[0].reshape(cfg.TRAIN.IMS_PER_BATCH, 3, max(cfg.TRAIN.SCALES), cfg.TRAIN.MAX_SIZE)
self._name_to_top_map['data'] = 0
top[1].reshape(1, 4)
self._name_to_top_map['rois'] = 1
top[2].reshape(1, 1, self.input_mz, self.input_mz)
self._name_to_top_map['masks'] = 2
top[3].reshape(1, 1)
self._name_to_top_map['box_label'] = 3
top[4].reshape(1, 1)
self._name_to_top_map['mask_label'] = 4
top[5].reshape(1, self._num_classes * 4)
self._name_to_top_map['bbox_targets'] = 5
top[6].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._name_to_top_map['mask_targets'] = 6
top[7].reshape(1, self._num_classes * 4)
self._name_to_top_map['bbox_inside_weights'] = 7
top[8].reshape(1, self._num_classes * 4)
self._name_to_top_map['bbox_outside_weights'] = 8
top[9].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._name_to_top_map['mask_weight'] = 9
assert len(top) == len(self._name_to_top_map)
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*blob.shape)
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def set_image_info(self, imdb, mean, std):
self.imdb = imdb
self._mean = mean
self._std = std
self._shuffle_roidb_inds()
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
import PIL
num_images = len(self.imdb.image_index)
width_r = [PIL.Image.open(self.imdb.image_path_at(i)).size[0] for i in xrange(num_images)]
height_r = [PIL.Image.open(self.imdb.image_path_at(i)).size[0] for i in xrange(num_images)]
widths = np.array([width_r[i] for i in xrange(len(width_r))])
heights = np.array([height_r[i] for i in xrange(len(height_r))])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
inds = np.reshape(np.hstack((inds, inds+num_images)), (-1, 2))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_image_blob(self, roidb, scale_inds, im_names):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(im_names[i])
# here [0][0] is due to the nature of scipy.io.savemat
# since it will change True/False to [[1]] or [[0]] with shape (1,1)
# so we judge whether flip image in this un-normal way
if roidb[i]['Flip'][0][0]:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _get_next_minibatch(self):
"""
Return the blobs to be used for the next minibatch.
"""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._perm):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
total_imgs = self.imdb.num_images
roidbs = []
img_names = []
for db_ind in list(db_inds):
cache_dir = self.imdb.roidb_path_at(db_ind)
roidb = scipy.io.loadmat(cache_dir)
roidbs.append(roidb)
img_names.append(self.imdb.image_path_at(db_ind % total_imgs))
blobs = self._sample_blobs(roidbs, img_names)
return blobs
def _sample_blobs(self, roidbs, img_names):
random_scale_inds = np.random.randint(0, high=len(cfg.TRAIN.SCALES), size=cfg.TRAIN.IMS_PER_BATCH)
im_blob, im_scales = self._get_image_blob(roidbs, random_scale_inds, img_names)
rois_per_img = cfg.TRAIN.BATCH_SIZE / cfg.TRAIN.IMS_PER_BATCH
rois_blob = np.zeros((0, 5), dtype=np.float32)
masks_blob = np.zeros((0, 1, self.input_mz, self.input_mz))
box_labels_blob = np.zeros((0, 1))
mask_labels_blob = np.zeros((0, 1))
bbox_targets_blob = np.zeros((0, self._num_classes * 4))
mask_targets_blob = np.zeros((0, 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
bbox_inside_weights_blob = np.zeros((0, self._num_classes * 4))
bbox_outside_weights_blob = np.zeros((0, self._num_classes * 4))
mask_weights_blob = np.zeros((0, 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
for im_i, roidb in enumerate(roidbs):
# Sample positive/negative using box-level overlap
det_overlap = roidb['det_overlap']
num_gt = len(roidb['gt_classes'])
fg_det_inds = np.where(det_overlap >= cfg.TRAIN.FG_DET_THRESH)
keep_inds = []
for i in xrange(len(cfg.TRAIN.FRACTION_SAMPLE)):
cur_keep_inds = np.where((det_overlap >= cfg.TRAIN.THRESH_LO_SAMPLE[i]) &
(det_overlap <= cfg.TRAIN.THRESH_HI_SAMPLE[i]))[0]
cur_rois_this_image = np.round(rois_per_img * cfg.TRAIN.FRACTION_SAMPLE[i])
cur_rois_this_image = min(cur_rois_this_image, len(cur_keep_inds))
if cur_keep_inds.size > 0:
cur_keep_inds = npr.choice(cur_keep_inds, size=cur_rois_this_image, replace=False)
if i == 0:
keep_inds = cur_keep_inds
else:
keep_inds = np.unique(np.hstack((keep_inds, cur_keep_inds)))
fg_inds_det = keep_inds[np.in1d(keep_inds, fg_det_inds)]
bg_inds_det = keep_inds[np.in1d(keep_inds, fg_det_inds, invert=True)]
keep_inds = np.append(fg_inds_det, bg_inds_det).astype(int)
# Assign box-level label and mask-level label
input_box_labels = roidb['output_label'][keep_inds]
# input_box_labels[len(fg_inds_det):] = 0
input_box_labels[len(fg_inds_det):] = 0
seg_overlap = roidb['seg_overlap'][keep_inds]
bg_inds_seg = np.where(seg_overlap < cfg.TRAIN.FG_SEG_THRESH)[0]
input_mask_labels = input_box_labels.copy()
input_mask_labels[bg_inds_seg] = 0
gt_classes = roidb['gt_classes']
input_masks = roidb['masks'][keep_inds, :, :]
input_boxes = roidb['boxes'][keep_inds, :] * im_scales[im_i]
mask_target = roidb['mask_targets']
mask_target = mask_target[keep_inds, :, :]
mask_resize = np.zeros((input_masks.shape[0], self.input_mz, self.input_mz))
for i in xrange(mask_target.shape[0]):
mask_resize[i, :, :] = cv2.resize(input_masks[i, :, :].astype(np.float), (self.input_mz, self.input_mz))
mask_resize = mask_resize >= cfg.BINARIZE_THRESH
mask_target_weights = np.zeros(mask_target.shape)
mask_target_weights[0:len(fg_inds_det), :, :] = 1
gt_boxes = roidb['boxes'][0:num_gt, :] * im_scales[im_i]
gt_assignment = roidb['gt_assignment'][:, 0]
bbox_target_data = bbox_compute_targets(input_boxes, gt_boxes[gt_assignment[keep_inds], :4], False)
# normalize targets
bbox_target_data = np.hstack((input_box_labels, bbox_target_data))\
.astype(np.float32, copy=False)
bbox_targets, bbox_inside_weights = get_bbox_regression_label(
bbox_target_data, self._num_classes)
for i in xrange(len(fg_inds_det)):
cls = gt_classes[gt_assignment[fg_inds_det[i]]][0]
if cls == 0:
continue
mean = self._mean
std = self._std
bbox_targets[i, cls*4:cls*4+4] -= mean[cls, :]
bbox_targets[i, cls*4:cls*4+4] /= std[cls, :]
bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)
input_boxes = np.hstack((im_i * np.ones((input_boxes.shape[0], 1)), input_boxes))
bz = input_boxes.shape[0]
rois_blob = np.vstack((rois_blob, input_boxes))
masks_blob = np.concatenate((masks_blob,
mask_resize.reshape(bz, 1, self.input_mz, self.input_mz)), axis=0)
box_labels_blob = np.concatenate((box_labels_blob, input_box_labels), axis=0)
mask_labels_blob = np.concatenate((mask_labels_blob, input_mask_labels), axis=0)
bbox_targets_blob = np.concatenate((bbox_targets_blob, bbox_targets), axis=0)
mask_targets_blob = np.concatenate((mask_targets_blob,
mask_target.reshape(bz, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)), axis=0)
bbox_inside_weights_blob = np.concatenate((bbox_inside_weights_blob, bbox_inside_weights), axis=0)
bbox_outside_weights_blob = np.concatenate((bbox_outside_weights_blob, bbox_outside_weights), axis=0)
mask_weights_blob = np.concatenate((mask_weights_blob,
mask_target_weights.reshape(bz, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)), axis=0)
return {
'data': im_blob,
'rois': rois_blob,
'masks': masks_blob,
'box_label': box_labels_blob,
'mask_label': mask_labels_blob,
'bbox_targets': bbox_targets_blob,
'mask_targets': mask_targets_blob,
'bbox_inside_weights': bbox_inside_weights_blob,
'bbox_outside_weights': bbox_outside_weights_blob,
'mask_weight': mask_weights_blob
}
| 11,892 | 44.39313 | 122 | py |
flowseq | flowseq-master/flownmt/utils.py | import logging
import sys
from typing import Tuple, List
import torch
from torch._six import inf
def get_logger(name, level=logging.INFO, handler=sys.stdout,
formatter='%(asctime)s - %(name)s - %(levelname)s - %(message)s'):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(formatter)
stream_handler = logging.StreamHandler(handler)
stream_handler.setLevel(level)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
def norm(p: torch.Tensor, dim: int):
"""Computes the norm over all dimensions except dim"""
if dim is None:
return p.norm()
elif dim == 0:
output_size = (p.size(0),) + (1,) * (p.dim() - 1)
return p.contiguous().view(p.size(0), -1).norm(dim=1).view(*output_size)
elif dim == p.dim() - 1:
output_size = (1,) * (p.dim() - 1) + (p.size(-1),)
return p.contiguous().view(-1, p.size(-1)).norm(dim=0).view(*output_size)
else:
return norm(p.transpose(0, dim), 0).transpose(0, dim)
def exponentialMovingAverage(original, shadow, decay_rate, init=False):
params = dict()
for name, param in shadow.named_parameters():
params[name] = param
for name, param in original.named_parameters():
shadow_param = params[name]
if init:
shadow_param.data.copy_(param.data)
else:
shadow_param.data.add_((1 - decay_rate) * (param.data - shadow_param.data))
def logPlusOne(x):
"""
compute log(x + 1) for small x
Args:
x: Tensor
Returns: Tensor
log(x+1)
"""
eps = 1e-4
mask = x.abs().le(eps).type_as(x)
return x.mul(x.mul(-0.5) + 1.0) * mask + (x + 1.0).log() * (1.0 - mask)
def gate(x1, x2):
return x1 * x2.sigmoid_()
def total_grad_norm(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
return total_norm
def squeeze(x: torch.Tensor, mask: torch.Tensor, factor: int = 2) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
x: Tensor
input tensor [batch, length, features]
mask: Tensor
mask tensor [batch, length]
factor: int
squeeze factor (default 2)
Returns: Tensor1, Tensor2
squeezed x [batch, length // factor, factor * features]
squeezed mask [batch, length // factor]
"""
assert factor >= 1
if factor == 1:
return x
batch, length, features = x.size()
assert length % factor == 0
# [batch, length // factor, factor * features]
x = x.contiguous().view(batch, length // factor, factor * features)
mask = mask.view(batch, length // factor, factor).sum(dim=2).clamp(max=1.0)
return x, mask
def unsqueeze(x: torch.Tensor, factor: int = 2) -> torch.Tensor:
"""
Args:
x: Tensor
input tensor [batch, length, features]
factor: int
unsqueeze factor (default 2)
Returns: Tensor
squeezed tensor [batch, length * 2, features // 2]
"""
assert factor >= 1
if factor == 1:
return x
batch, length, features = x.size()
assert features % factor == 0
# [batch, length * factor, features // factor]
x = x.view(batch, length * factor, features // factor)
return x
def split(x: torch.Tensor, z1_features) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
x: Tensor
input tensor [batch, length, features]
z1_features: int
the number of features of z1
Returns: Tensor, Tensor
split tensors [batch, length, z1_features], [batch, length, features-z1_features]
"""
z1 = x[:, :, :z1_features]
z2 = x[:, :, z1_features:]
return z1, z2
def unsplit(xs: List[torch.Tensor]) -> torch.Tensor:
"""
Args:
xs: List[Tensor]
tensors to be combined
Returns: Tensor
combined tensor
"""
return torch.cat(xs, dim=2)
def make_positions(tensor, padding_idx):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
mask = tensor.ne(padding_idx).long()
return torch.cumsum(mask, dim=1) * mask
# def prepare_rnn_seq(rnn_input, lengths, batch_first=False):
# '''
# Args:
# rnn_input: [seq_len, batch, input_size]: tensor containing the features of the input sequence.
# lengths: [batch]: tensor containing the lengthes of the input sequence
# batch_first: If True, then the input and output tensors are provided as [batch, seq_len, feature].
# Returns:
# '''
#
# def check_decreasing(lengths):
# lens, order = torch.sort(lengths, dim=0, descending=True)
# if torch.ne(lens, lengths).sum() == 0:
# return None
# else:
# _, rev_order = torch.sort(order)
# return lens, order, rev_order
#
# check_res = check_decreasing(lengths)
#
# if check_res is None:
# lens = lengths
# rev_order = None
# else:
# lens, order, rev_order = check_res
# batch_dim = 0 if batch_first else 1
# rnn_input = rnn_input.index_select(batch_dim, order)
# lens = lens.tolist()
# seq = pack_padded_sequence(rnn_input, lens, batch_first=batch_first)
# return seq, rev_order
#
# def recover_rnn_seq(seq, rev_order, batch_first=False, total_length=None):
# output, _ = pad_packed_sequence(seq, batch_first=batch_first, total_length=total_length)
# if rev_order is not None:
# batch_dim = 0 if batch_first else 1
# output = output.index_select(batch_dim, rev_order)
# return output
#
#
# def recover_order(tensors, rev_order):
# if rev_order is None:
# return tensors
# recovered_tensors = [tensor.index_select(0, rev_order) for tensor in tensors]
# return recovered_tensors
#
#
# def decreasing_order(lengths, tensors):
# def check_decreasing(lengths):
# lens, order = torch.sort(lengths, dim=0, descending=True)
# if torch.ne(lens, lengths).sum() == 0:
# return None
# else:
# _, rev_order = torch.sort(order)
# return lens, order, rev_order
#
# check_res = check_decreasing(lengths)
#
# if check_res is None:
# lens = lengths
# rev_order = None
# ordered_tensors = tensors
# else:
# lens, order, rev_order = check_res
# ordered_tensors = [tensor.index_select(0, order) for tensor in tensors]
#
# return lens, ordered_tensors, rev_order
| 7,058 | 30.513393 | 108 | py |
flowseq | flowseq-master/flownmt/flownmt.py | import os
import json
import math
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.distributed as dist
from apex.parallel import DistributedDataParallel
from apex.parallel.distributed import flat_dist_call
from flownmt.modules import Encoder
from flownmt.modules import Posterior
from flownmt.modules import Decoder
from flownmt.modules import Prior
class FlowNMTCore(nn.Module):
"""
core module for flow nmt model
"""
def __init__(self, encoder: Encoder, prior: Prior, posterior: Posterior, decoder: Decoder):
super(FlowNMTCore, self).__init__()
self.encoder = encoder
self.prior = prior
self.posterior = posterior
self.decoder = decoder
def sync(self):
self.prior.sync()
def init(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
src_enc = self.encoder.init(src_sents, masks=src_masks, init_scale=init_scale)
z, _ = self.posterior.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=True, init_var=True)
self.prior.init(z, tgt_masks, src_enc, src_masks, init_scale=init_scale)
self.decoder.init(z, tgt_masks, src_enc, src_masks, init_scale=init_scale)
def init_posterior(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
src_enc = self.encoder.init(src_sents, masks=src_masks, init_scale=init_scale)
z, _ = self.posterior.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=True, init_var=False)
self.decoder.init(z, tgt_masks, src_enc, src_masks, init_scale=init_scale)
def init_prior(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
with torch.no_grad():
src_enc, _ = self.encoder(src_sents, masks=src_masks)
z, _ = self.posterior.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=False, init_var=True)
self.prior.init(z.squeeze(1), tgt_masks, src_enc, src_masks, init_scale=init_scale)
def sample_from_prior(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
nlengths: int = 1, nsamples: int = 1, tau: float = 0.0,
include_zero=False) \
-> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
sampling from prior distribution
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
nlengths: int (default 1)
number of length candidates
nsamples: int (default 1)
number of samples per src per length candidate
tau: float (default 0.0)
temperature
Returns: (Tensor1, Tensor2, Tensor3), (Tensor4, Tensor5), (Tensor6, Tensor7)
Tensor1: samples from the prior [batch * nlengths * nsamples, tgt_length, nz]
Tensor2: log probabilities [batch * nlengths * nsamples]
Tensor3: target masks [batch * nlengths * nsamples, tgt_length]
Tensor4: lengths [batch * nlengths]
Tensor5: log probabilities of lengths [batch * nlengths]
Tensor6: source encoding with shape [batch * nlengths * nsamples, src_length, hidden_size]
Tensor7: tensor for global state [batch * nlengths * nsamples, hidden_size]
Tensor8: source masks with shape [batch * nlengths * nsamples, src_length]
"""
src_enc, ctx = self.encoder(src_sents, masks=src_masks)
# [batch, nsamples, tgt_length, nz]
return self.prior.sample(nlengths, nsamples, src_enc, ctx, src_masks, tau=tau,
include_zero=include_zero)
def sample_from_posterior(self, tgt_sents: torch, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int = 1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
"""
sampling from posterior distribution
Args:
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
tgt_masks: Tensor [batch, tgt_length]
tensor for target masks
src_enc: Tensor [batch, src_length, hidden_size]
tensor for source encoding
src_masks: Tensor [batch, src_length] or None
tensor for source masks
nsamples: int
number of samples
random: bool
if True, perform random sampling. Otherwise, return mean.
Returns: Tensor1, Tensor2
Tensor1: samples from the posterior [batch, nsamples, tgt_length, nz]
Tensor2: log probabilities [batch, nsamples]
"""
return self.posterior.sample(tgt_sents, tgt_masks, src_enc, src_masks, nsamples=nsamples, random=random)
def reconstruct(self, src_sents: torch.Tensor, tgt_sents: torch.Tensor,
src_masks: torch.Tensor, tgt_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
src_enc, ctx = self.encoder(src_sents, masks=src_masks)
z, _ = self.sample_from_posterior(tgt_sents, tgt_masks, src_enc, src_masks, random=False)
z = z.squeeze(1)
recon, _ = self.decoder.decode(z, tgt_masks, src_enc, src_masks)
recon_err = self.decoder.loss(z, tgt_sents, tgt_masks, src_enc, src_masks)
loss_length = self.prior.length_loss(ctx, src_masks, tgt_masks)
lengths, log_probs = self.prior.predict_length(ctx, src_masks, topk=1)
return recon, recon_err, loss_length, lengths.squeeze(1), log_probs.squeeze(1) * -1.
def translate_argmax(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_tr: int = 1, tau: float = 0.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch, tgt_length]
Tensor2: lengths [batch]
"""
batch = src_sents.size(0)
# [batch * n_tr, tgt_length, nz]
(z, log_probs, tgt_masks), (lengths, _), (src, _, _) = self.sample_from_prior(src_sents, src_masks, nlengths=1, nsamples=n_tr, tau=tau)
if n_tr > 1:
nbatch, length, nz = z.size()
# [batch, n_tr, tgt_length, nz]
z = z.view(batch, n_tr, length, nz)
# [batch, n_tr]
log_probs = log_probs.view(batch, n_tr)
# [batch, n_tr, tgt_length]
tgt_masks = tgt_masks.view(batch, n_tr, length)
# [batch, n_tr, src_length, dim]
src = src.view(batch, n_tr, *src.size()[1:])
# [batch]
idx = log_probs.argmax(dim=1)
batch_idx = torch.arange(0, batch).long().to(idx.device)
# [batch, tgt_length, nz]
z = z[batch_idx, idx]
# [batch, tgt_length]
tgt_masks = tgt_masks[batch_idx, idx]
# [batch, src_length, n_tr]
src = src[:, 0]
# [batch, tgt_length]
trans, _ = self.decoder.decode(z, tgt_masks, src, src_masks)
return trans, lengths
def translate_iw(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_len: int = 1, n_tr: int = 1,
tau: float = 0.0, k: int = 1) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_len: int (default 1)
number of length candidates
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
k: int (default 1)
number of samples for importance weighted sampling
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch, tgt_length]
Tensor2: lengths [batch]
"""
batch = src_sents.size(0)
# [batch * n_len * n_tr, tgt_length, nz]
(z, _, tgt_masks), \
(lengths, log_probs_length), \
(src, ctx, src_masks) = self.sample_from_prior(src_sents, src_masks,
nlengths=n_len, nsamples=n_tr,
tau=tau, include_zero=True)
# [batch, n_len]
lengths = lengths.view(batch, n_len)
log_probs_length = log_probs_length.view(batch, n_len)
# [batch * n_len * n_tr, tgt_length]
trans, _ = self.decoder.decode(z, tgt_masks, src, src_masks)
# [batch, n_len * n_tr, tgt_length]
trans_org = trans.view(batch, n_len * n_tr, trans.size(1))
# [batch * n_len * n_tr, k, tgt_length, nz]
z, log_probs_posterior = self.sample_from_posterior(trans, tgt_masks, src, src_masks, nsamples=k, random=True)
nbatch, _, length, nz = z.size()
if k > 1:
# [batch * n_len * n_tr, k, src_length, hidden_size]
src = src.unsqueeze(1) + src.new_zeros(nbatch, k, *src.size()[1:])
# [batch * n_len * n_tr * k, src_length, hidden_size]
src = src.view(nbatch * k, *src.size()[2:])
# [batch * n_len * n_tr, k, hidden_size]
ctx = ctx.unsqueeze(1) + ctx.new_zeros(nbatch, k, ctx.size(1))
# [batch * n_len * n_tr * k, hidden_size]
ctx = ctx.view(nbatch * k, ctx.size(2))
# [batch * n_len * n_tr, k, src_length]
src_masks = src_masks.unsqueeze(1) + src_masks.new_zeros(nbatch, k, src_masks.size(1))
# [batch * n_len * n_tr * k, src_length]
src_masks = src_masks.view(nbatch * k, src_masks.size(2))
# [batch * n_len * n_tr, k, tgt_length]
tgt_masks = tgt_masks.unsqueeze(1) + tgt_masks.new_zeros(nbatch, k, tgt_masks.size(1))
# [batch * n_len * n_tr * k, src_length]
tgt_masks = tgt_masks.view(nbatch * k, tgt_masks.size(2))
# [batch * n_len * n_tr, k, tgt_length]
trans = trans.unsqueeze(1) + trans.new_zeros(nbatch, k, trans.size(1))
# [batch * n_len * n_tr * k, tgt_length]
trans = trans.view(nbatch * k, trans.size(2))
# [batch * n_len * n_tr * k, tgt_length, nz]
z = z.view(-1, length, nz)
# [batch * n_len * n_tr * k]
log_probs_prior, _ = self.prior.log_probability(z, tgt_masks, src, ctx, src_masks, length_loss=False)
# [batch * n_len * n_tr, k]
log_probs_prior = log_probs_prior.view(nbatch, k)
minus_log_prob_decode = self.decoder.loss(z, trans, tgt_masks, src, src_masks).view(nbatch, k)
log_iw = log_probs_prior - minus_log_prob_decode - log_probs_posterior
# [batch, n_len, n_tr]
nlprobs = math.log(k) - torch.logsumexp(log_iw, dim=1).view(batch, n_len, n_tr)
# [batch, n_len, n_tr]
nlprobs = nlprobs - log_probs_length.unsqueeze(2)
nlprobs = nlprobs / lengths.unsqueeze(2).float()
idx = nlprobs.view(batch, -1).argmin(dim=1)
batch_idx = torch.arange(0, batch).long().to(idx.device)
trans = trans_org[batch_idx, idx]
lengths = lengths[batch_idx, idx.div(n_tr)]
return trans, lengths
def translate_sample(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_len: int = 1, n_tr: int = 1, tau: float = 0.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_len: int (default 1)
number of length candidates
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch * n_len * n_tr, tgt_length]
Tensor2: lengths [batch * n_len * n_tr]
"""
batch = src_sents.size(0)
# [batch * n_len * n_tr, tgt_length, nz]
(z, _, tgt_masks), \
(lengths, _), \
(src, _, src_masks) = self.sample_from_prior(src_sents, src_masks,
nlengths=n_len, nsamples=n_tr,
tau=tau, include_zero=False)
# [batch * n_len * n_tr, tgt_length]
trans, _ = self.decoder.decode(z, tgt_masks, src, src_masks)
# [batch, n_len]
lengths = lengths.view(batch, n_len, 1).expand(batch, n_len, n_tr).contiguous()
lengths = lengths.view(batch * n_len * n_tr)
return trans, lengths
def reconstruct_loss(self, src_sents: torch.Tensor, tgt_sents: torch,
src_masks: torch.Tensor, tgt_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
tgt_masks: Tensor [batch, tgt_length] or None
tensor for target masks
Returns: Tensor1, Tensor2
Tensor1: reconstruction error [batch]
Tensor2: length loss [batch]
"""
src_enc, ctx = self.encoder(src_sents, masks=src_masks)
z, _ = self.sample_from_posterior(tgt_sents, tgt_masks, src_enc, src_masks, random=False)
# [batch, tgt_length, nz]
z = z.squeeze(1)
loss_length = self.prior.length_loss(ctx, src_masks, tgt_masks)
recon_err = self.decoder.loss(z, tgt_sents, tgt_masks, src_enc, src_masks)
return recon_err, loss_length
def translate_loss(self, src_sents: torch.Tensor, tgt_sents: torch,
src_masks: torch.Tensor, tgt_masks: torch.Tensor,
nsamples: int = 1) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
tgt_masks: Tensor [batch, tgt_length] or None
tensor for target masks
nsamples: int
number of samples
Returns: Tensor1, Tensor2, Tensor3
Tensor1: reconstruction error [batch]
Tensor2: KL [batch]
Tensor3: length loss [batch]
"""
src_enc, ctx = self.encoder(src_sents, masks=src_masks)
z, log_probs_posterior = self.sample_from_posterior(tgt_sents, tgt_masks, src_enc, src_masks,
nsamples=nsamples, random=True)
batch, _, length, nz = z.size()
if nsamples > 1:
# [batch, nsamples, src_length, hidden_size]
src_enc = src_enc.unsqueeze(1) + src_enc.new_zeros(batch, nsamples, *src_enc.size()[1:])
# [batch * nsamples, src_length, hidden_size]
src_enc = src_enc.view(batch * nsamples, *src_enc.size()[2:])
# [batch, nsamples, hidden_size]
ctx = ctx.unsqueeze(1) + ctx.new_zeros(batch, nsamples, ctx.size(1))
ctx = ctx.view(batch * nsamples, ctx.size(2))
# [batch, nsamples, src_length]
src_masks = src_masks.unsqueeze(1) + src_masks.new_zeros(batch, nsamples, src_masks.size(1))
# [batch * nsamples, src_length]
src_masks = src_masks.view(batch * nsamples, src_masks.size(2))
# [batch, nsamples, tgt_length]
tgt_masks = tgt_masks.unsqueeze(1) + tgt_masks.new_zeros(batch, nsamples, tgt_masks.size(1))
# [batch * nsamples, src_length]
tgt_masks = tgt_masks.view(batch * nsamples, tgt_masks.size(2))
# [batch, nsamples, tgt_length]
tgt_sents = tgt_sents.unsqueeze(1) + tgt_sents.new_zeros(batch, nsamples, tgt_sents.size(1))
tgt_sents = tgt_sents.view(batch * nsamples, tgt_sents.size(2))
# [batch * nsamples, tgt_length, nz]
z = z.view(-1, length, nz)
# [batch * nsamples] -> [batch, nsamples]
log_probs_prior, loss_length = self.prior.log_probability(z, tgt_masks, src_enc, ctx, src_masks, length_loss=True)
log_probs_prior = log_probs_prior.view(batch, nsamples)
loss_length = loss_length.view(batch, nsamples)
# [batch]
KL = (log_probs_posterior - log_probs_prior).mean(dim=1)
loss_length = loss_length.mean(dim=1)
# [batch * nsamples] -> [batch, nsamples] -> [batch]
recon_err = self.decoder.loss(z, tgt_sents, tgt_masks, src_enc, src_masks).view(batch, nsamples).mean(dim=1)
return recon_err, KL, loss_length
def forward(self, src_sents: torch.Tensor, tgt_sents: torch, src_masks: torch.Tensor, tgt_masks: torch.Tensor,
nsamples: int = 1, only_recon_loss=False):
if only_recon_loss:
return self.reconstruct_loss(src_sents, tgt_sents, src_masks, tgt_masks)
else:
return self.translate_loss(src_sents, tgt_sents, src_masks, tgt_masks, nsamples=nsamples)
class FlowNMT(nn.Module):
"""
NMT model with Generative Flow.
"""
def __init__(self, core: FlowNMTCore):
super(FlowNMT, self).__init__()
self.core = core
self.length_unit = self.core.prior.length_unit
self.distribured_enabled = False
def _get_core(self):
return self.core.module if self.distribured_enabled else self.core
def sync(self):
core = self._get_core()
core.prior.sync()
def init(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
core = self._get_core()
core.init(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=init_scale)
def init_posterior(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
core = self._get_core()
core.init_posterior(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=init_scale)
def init_prior(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
core = self._get_core()
core.init_prior(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=init_scale)
def reconstruct(self, src_sents: torch.Tensor, tgt_sents: torch.Tensor,
src_masks: torch.Tensor, tgt_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
return self._get_core().reconstruct(src_sents, tgt_sents, src_masks, tgt_masks)
def translate_argmax(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_tr: int = 1, tau: float = 0.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch, tgt_length]
Tensor2: lengths [batch]
"""
return self._get_core().translate_argmax(src_sents, src_masks, n_tr=n_tr, tau=tau)
def translate_iw(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_len: int = 1, n_tr: int = 1,
tau: float = 0.0, k: int = 1) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_len: int (default 1)
number of length candidates
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
k: int (default 1)
number of samples for importance weighted sampling
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch, tgt_length]
Tensor2: lengths [batch]
"""
return self._get_core().translate_iw(src_sents, src_masks, n_len=n_len, n_tr=n_tr,
tau=tau, k=k)
def translate_sample(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_len: int = 1, n_tr: int = 1, tau: float = 0.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_len: int (default 1)
number of length candidates
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch * n_len * n_tr, tgt_length]
Tensor2: lengths [batch * n_len * n_tr]
"""
return self._get_core().translate_sample(src_sents, src_masks, n_len=n_len, n_tr=n_tr, tau=tau)
def reconstruct_error(self, src_sents: torch.Tensor, tgt_sents: torch,
src_masks: torch.Tensor, tgt_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
tgt_masks: Tensor [batch, tgt_length] or None
tensor for target masks
Returns: Tensor1, Tensor2
Tensor1: reconstruction error [batch]
Tensor2: length loss [batch]
"""
return self.core(src_sents, tgt_sents, src_masks, tgt_masks, only_recon_loss=True)
def loss(self, src_sents: torch.Tensor, tgt_sents: torch,
src_masks: torch.Tensor, tgt_masks: torch.Tensor,
nsamples: int = 1, eval=False) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
tgt_masks: Tensor [batch, tgt_length] or None
tensor for target masks
nsamples: int
number of samples
eval: bool
if eval, turn off distributed mode
Returns: Tensor1, Tensor2, Tensor3
Tensor1: reconstruction error [batch]
Tensor2: KL [batch]
Tensor3: length loss [batch]
"""
core = self._get_core() if eval else self.core
return core(src_sents, tgt_sents, src_masks, tgt_masks, nsamples=nsamples)
def init_distributed(self, rank, local_rank):
assert not self.distribured_enabled
self.distribured_enabled = True
print("Initializing Distributed, rank {}, local rank {}".format(rank, local_rank))
dist.init_process_group(backend='nccl', rank=rank)
torch.cuda.set_device(local_rank)
self.core = DistributedDataParallel(self.core)
def sync_params(self):
assert self.distribured_enabled
core = self._get_core()
flat_dist_call([param.data for param in core.parameters()], dist.all_reduce)
self.core.needs_refresh = True
def enable_allreduce(self):
assert self.distribured_enabled
self.core.enable_allreduce()
def disable_allreduce(self):
assert self.distribured_enabled
self.core.disable_allreduce()
def save(self, model_path):
model = {'core': self._get_core().state_dict()}
model_name = os.path.join(model_path, 'model.pt')
torch.save(model, model_name)
def save_core(self, path):
core = self._get_core()
model = {'prior': core.prior.state_dict(),
'encoder': core.encoder.state_dict(),
'decoder': core.decoder.state_dict(),
'posterior': core.posterior.state_dict()}
torch.save(model, path)
def load_core(self, path, device, load_prior=True):
model = torch.load(path, map_location=device)
core = self._get_core()
core.posterior.load_state_dict(model['posterior'])
core.encoder.load_state_dict(model['encoder'])
core.decoder.load_state_dict(model['decoder'])
if load_prior:
core.prior.load_state_dict(model['prior'])
@classmethod
def load(cls, model_path, device):
params = json.load(open(os.path.join(model_path, 'config.json'), 'r'))
flownmt = FlowNMT.from_params(params).to(device)
model_name = os.path.join(model_path, 'model.pt')
model = torch.load(model_name, map_location=device)
flownmt.core.load_state_dict(model['core'])
return flownmt
@classmethod
def from_params(cls, params: Dict) -> "FlowNMT":
src_vocab_size = params.pop('src_vocab_size')
tgt_vocab_size = params.pop('tgt_vocab_size')
embed_dim = params.pop('embed_dim')
latent_dim = params.pop('latent_dim')
hidden_size = params.pop('hidden_size')
max_src_length = params.pop('max_src_length')
max_tgt_length = params.pop('max_tgt_length')
src_pad_idx = params.pop('src_pad_idx')
tgt_pad_idx = params.pop('tgt_pad_idx')
share_embed = params.pop('share_embed')
tie_weights = params.pop('tie_weights')
# prior
prior_params = params.pop('prior')
prior_params['flow']['features'] = latent_dim
prior_params['flow']['src_features'] = latent_dim
prior_params['length_predictor']['features'] = latent_dim
prior_params['length_predictor']['max_src_length'] = max_src_length
prior = Prior.by_name(prior_params.pop('type')).from_params(prior_params)
# eocoder
encoder_params = params.pop('encoder')
encoder_params['vocab_size'] = src_vocab_size
encoder_params['embed_dim'] = embed_dim
encoder_params['padding_idx'] = src_pad_idx
encoder_params['latent_dim'] = latent_dim
encoder_params['hidden_size'] = hidden_size
encoder = Encoder.by_name(encoder_params.pop('type')).from_params(encoder_params)
# posterior
posterior_params = params.pop('posterior')
posterior_params['vocab_size'] = tgt_vocab_size
posterior_params['embed_dim'] = embed_dim
posterior_params['padding_idx'] = tgt_pad_idx
posterior_params['latent_dim'] = latent_dim
posterior_params['hidden_size'] = hidden_size
_shared_embed = encoder.embed if share_embed else None
posterior_params['_shared_embed'] = _shared_embed
posterior = Posterior.by_name(posterior_params.pop('type')).from_params(posterior_params)
# decoder
decoder_params = params.pop('decoder')
decoder_params['vocab_size'] = tgt_vocab_size
decoder_params['latent_dim'] = latent_dim
decoder_params['hidden_size'] = hidden_size
_shared_weight = posterior.tgt_embed.weight if tie_weights else None
decoder_params['_shared_weight'] = _shared_weight
decoder = Decoder.by_name(decoder_params.pop('type')).from_params(decoder_params)
return FlowNMT(FlowNMTCore(encoder, prior, posterior, decoder))
| 29,121 | 44.432137 | 154 | py |
flowseq | flowseq-master/flownmt/modules/decoders/simple.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.decoders.decoder import Decoder
from flownmt.nnet.attention import GlobalAttention
class SimpleDecoder(Decoder):
"""
Simple Decoder to predict translations from latent z
"""
def __init__(self, vocab_size, latent_dim, hidden_size, dropout=0.0, label_smoothing=0., _shared_weight=None):
super(SimpleDecoder, self).__init__(vocab_size, latent_dim,
label_smoothing=label_smoothing,
_shared_weight=_shared_weight)
self.attn = GlobalAttention(latent_dim, latent_dim, latent_dim, hidden_features=hidden_size)
ctx_features = latent_dim * 2
self.ctx_proj = nn.Sequential(nn.Linear(ctx_features, latent_dim), nn.ELU())
self.dropout = dropout
@overrides
def forward(self, z, src, src_mask):
ctx = self.attn(z, src, key_mask=src_mask.eq(0))
ctx = F.dropout(self.ctx_proj(torch.cat([ctx, z], dim=2)), p=self.dropout, training=self.training)
return self.readout(ctx)
@overrides
def init(self, z, mask, src, src_mask, init_scale=1.0):
with torch.no_grad():
self(z, src, src_mask)
@overrides
def decode(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
z: Tensor
latent code [batch, length, hidden_size]
mask: Tensor
mask [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor1, Tensor2
Tenser1: decoded word index [batch, length]
Tensor2: log probabilities of decoding [batch]
"""
# [batch, length, vocab_size]
log_probs = F.log_softmax(self(z, src, src_mask), dim=2)
# [batch, length]
log_probs, dec = log_probs.max(dim=2)
dec = dec * mask.long()
# [batch]
log_probs = log_probs.mul(mask).sum(dim=1)
return dec, log_probs
@overrides
def loss(self, z: torch.Tensor, target: torch.Tensor, mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
z: Tensor
latent codes [batch, length, hidden_size]
target: LongTensor
target translations [batch, length]
mask: Tensor
masks for target sentence [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor
tensor for loss [batch]
"""
# [batch, length, vocab_size] -> [batch, vocab_size, length]
logits = self(z, src, src_mask).transpose(1, 2)
# [batch, length]
loss = self.criterion(logits, target).mul(mask)
return loss.sum(dim=1)
@classmethod
def from_params(cls, params: Dict) -> "SimpleDecoder":
return SimpleDecoder(**params)
SimpleDecoder.register('simple')
| 3,347 | 33.875 | 138 | py |
flowseq | flowseq-master/flownmt/modules/decoders/transformer.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.decoders.decoder import Decoder
from flownmt.nnet.attention import MultiHeadAttention
from flownmt.nnet.transformer import TransformerDecoderLayer
from flownmt.nnet.positional_encoding import PositionalEncoding
class TransformerDecoder(Decoder):
"""
Decoder with Transformer
"""
def __init__(self, vocab_size, latent_dim, num_layers, hidden_size, heads, label_smoothing=0.,
dropout=0.0, dropword=0.0, max_length=100, _shared_weight=None):
super(TransformerDecoder, self).__init__(vocab_size, latent_dim,
label_smoothing=label_smoothing,
_shared_weight=_shared_weight)
self.pos_enc = PositionalEncoding(latent_dim, None, max_length + 1)
self.pos_attn = MultiHeadAttention(latent_dim, heads, dropout=dropout)
layers = [TransformerDecoderLayer(latent_dim, hidden_size, heads, dropout=dropout) for _ in range(num_layers)]
self.layers = nn.ModuleList(layers)
self.dropword = dropword # drop entire tokens
def forward(self, z, mask, src, src_mask):
z = F.dropout2d(z, p=self.dropword, training=self.training)
# [batch, length, latent_dim]
pos_enc = self.pos_enc(z) * mask.unsqueeze(2)
key_mask = mask.eq(0)
ctx = self.pos_attn(pos_enc, z, z, key_mask)
src_mask = src_mask.eq(0)
for layer in self.layers:
ctx = layer(ctx, key_mask, src, src_mask)
return self.readout(ctx)
@overrides
def init(self, z, mask, src, src_mask, init_scale=1.0):
with torch.no_grad():
return self(z, mask, src, src_mask)
@overrides
def decode(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
z: Tensor
latent code [batch, length, hidden_size]
mask: Tensor
mask [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor1, Tensor2
Tenser1: decoded word index [batch, length]
Tensor2: log probabilities of decoding [batch]
"""
# [batch, length, vocab_size]
log_probs = F.log_softmax(self(z, mask, src, src_mask), dim=2)
# [batch, length]
log_probs, dec = log_probs.max(dim=2)
dec = dec * mask.long()
# [batch]
log_probs = log_probs.mul(mask).sum(dim=1)
return dec, log_probs
@overrides
def loss(self, z: torch.Tensor, target: torch.Tensor, mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
z: Tensor
latent codes [batch, length, hidden_size]
target: LongTensor
target translations [batch, length]
mask: Tensor
masks for target sentence [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor
tensor for loss [batch]
"""
# [batch, length, vocab_size] -> [batch, vocab_size, length]
logits = self(z, mask, src, src_mask).transpose(1, 2)
# [batch, length]
loss = self.criterion(logits, target).mul(mask)
return loss.sum(dim=1)
@classmethod
def from_params(cls, params: Dict) -> "TransformerDecoder":
return TransformerDecoder(**params)
TransformerDecoder.register('transformer')
| 3,889 | 35.018519 | 138 | py |
flowseq | flowseq-master/flownmt/modules/decoders/decoder.py | from typing import Dict, Tuple
import torch
import torch.nn as nn
from flownmt.nnet.criterion import LabelSmoothedCrossEntropyLoss
class Decoder(nn.Module):
"""
Decoder to predict translations from latent z
"""
_registry = dict()
def __init__(self, vocab_size, latent_dim, label_smoothing=0., _shared_weight=None):
super(Decoder, self).__init__()
self.readout = nn.Linear(latent_dim, vocab_size, bias=True)
if _shared_weight is not None:
self.readout.weight = _shared_weight
nn.init.constant_(self.readout.bias, 0.)
else:
self.reset_parameters(latent_dim)
if label_smoothing < 1e-5:
self.criterion = nn.CrossEntropyLoss(reduction='none')
elif 1e-5 < label_smoothing < 1.0:
self.criterion = LabelSmoothedCrossEntropyLoss(label_smoothing)
else:
raise ValueError('label smoothing should be less than 1.0.')
def reset_parameters(self, dim):
# nn.init.normal_(self.readout.weight, mean=0, std=dim ** -0.5)
nn.init.uniform_(self.readout.weight, -0.1, 0.1)
nn.init.constant_(self.readout.bias, 0.)
def init(self, z, mask, src, src_mask, init_scale=1.0):
raise NotImplementedError
def decode(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
z: Tensor
latent code [batch, length, hidden_size]
mask: Tensor
mask [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor1, Tensor2
Tenser1: decoded word index [batch, length]
Tensor2: log probabilities of decoding [batch]
"""
raise NotImplementedError
def loss(self, z: torch.Tensor, target: torch.Tensor, mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
z: Tensor
latent codes [batch, length, hidden_size]
target: LongTensor
target translations [batch, length]
mask: Tensor
masks for target sentence [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor
tensor for loss [batch]
"""
raise NotImplementedError
@classmethod
def register(cls, name: str):
Decoder._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Decoder._registry[name]
@classmethod
def from_params(cls, params: Dict) -> "Decoder":
raise NotImplementedError
Decoder.register('simple')
| 2,946 | 30.351064 | 138 | py |
flowseq | flowseq-master/flownmt/modules/decoders/rnn.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from flownmt.modules.decoders.decoder import Decoder
from flownmt.nnet.attention import GlobalAttention
class RecurrentDecoder(Decoder):
"""
Decoder with Recurrent Neural Networks
"""
def __init__(self, vocab_size, latent_dim, rnn_mode, num_layers, hidden_size, bidirectional=True,
dropout=0.0, dropword=0.0, label_smoothing=0., _shared_weight=None):
super(RecurrentDecoder, self).__init__(vocab_size, latent_dim,
label_smoothing=label_smoothing,
_shared_weight=_shared_weight)
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
assert hidden_size % 2 == 0
# RNN for processing latent variables zs
if bidirectional:
self.rnn = RNN(latent_dim, hidden_size // 2, num_layers=num_layers, batch_first=True, bidirectional=True)
else:
self.rnn = RNN(latent_dim, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=False)
self.attn = GlobalAttention(latent_dim, hidden_size, latent_dim, hidden_features=hidden_size)
self.ctx_proj = nn.Sequential(nn.Linear(latent_dim + hidden_size, latent_dim), nn.ELU())
self.dropout = dropout
self.dropout2d = nn.Dropout2d(dropword) if dropword > 0. else None # drop entire tokens
def forward(self, z, mask, src, src_mask):
lengths = mask.sum(dim=1).long()
if self.dropout2d is not None:
z = self.dropout2d(z)
packed_z = pack_padded_sequence(z, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_z)
enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=mask.size(1))
ctx = self.attn(enc, src, key_mask=src_mask.eq(0))
ctx = torch.cat([ctx, enc], dim=2)
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
return self.readout(ctx)
@overrides
def init(self, z, mask, src, src_mask, init_scale=1.0):
with torch.no_grad():
return self(z, mask, src, src_mask)
@overrides
def decode(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
z: Tensor
latent code [batch, length, hidden_size]
mask: Tensor
mask [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor1, Tensor2
Tenser1: decoded word index [batch, length]
Tensor2: log probabilities of decoding [batch]
"""
# [batch, length, vocab_size]
log_probs = F.log_softmax(self(z, mask, src, src_mask), dim=2)
# [batch, length]
log_probs, dec = log_probs.max(dim=2)
dec = dec * mask.long()
# [batch]
log_probs = log_probs.mul(mask).sum(dim=1)
return dec, log_probs
@overrides
def loss(self, z: torch.Tensor, target: torch.Tensor, mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
z: Tensor
latent codes [batch, length, hidden_size]
target: LongTensor
target translations [batch, length]
mask: Tensor
masks for target sentence [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor
tensor for loss [batch]
"""
# [batch, length, vocab_size] -> [batch, vocab_size, length]
logits = self(z, mask, src, src_mask).transpose(1, 2)
# [batch, length]
loss = self.criterion(logits, target).mul(mask)
return loss.sum(dim=1)
@classmethod
def from_params(cls, params: Dict) -> "RecurrentDecoder":
return RecurrentDecoder(**params)
RecurrentDecoder.register('rnn')
| 4,558 | 36.368852 | 138 | py |
flowseq | flowseq-master/flownmt/modules/priors/prior.py | import math
from typing import Dict, Tuple, Union
import torch
import torch.nn as nn
from flownmt.flows.nmt import NMTFlow
from flownmt.modules.priors.length_predictors import LengthPredictor
class Prior(nn.Module):
"""
class for Prior with a NMTFlow inside
"""
_registry = dict()
def __init__(self, flow: NMTFlow, length_predictor: LengthPredictor):
super(Prior, self).__init__()
assert flow.inverse, 'prior flow should have inverse mode'
self.flow = flow
self.length_unit = max(2, 2 ** (self.flow.levels - 1))
self.features = self.flow.features
self._length_predictor = length_predictor
self._length_predictor.set_length_unit(self.length_unit)
def sync(self):
self.flow.sync()
def predict_length(self, ctx: torch.Tensor, src_mask: torch.Tensor, topk: int = 1) -> Tuple[torch.LongTensor, torch.Tensor]:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
topk: int (default 1)
return top k length candidates for each src sentence
Returns: LongTensor1, Tensor2
LongTensor1: tensor for lengths [batch, topk]
Tensor2: log probs for each length [batch, topk]
"""
return self._length_predictor.predict(ctx, src_mask, topk=topk)
def length_loss(self, ctx: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
tgt_mask: Tensor
tensor for target mask [batch, tgt_length]
Returns: Tensor
tensor for loss [batch]
"""
return self._length_predictor.loss(ctx, src_mask, tgt_mask)
def decode(self, epsilon: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
epsilon: Tensor
epslion [batch, tgt_length, nz]
tgt_mask: Tensor
tensor of target masks [batch, tgt_length]
src: Tensor
source encoding [batch, src_length, hidden_size]
src_mask: Tensor
tensor of source masks [batch, src_length]
Returns: Tensor1, Tensor2
Tensor1: decoded latent code z [batch, tgt_length, nz]
Tensor2: log probabilities [batch]
"""
# [batch, tgt_length, nz]
z, logdet = self.flow.fwdpass(epsilon, tgt_mask, src, src_mask)
# [batch, tgt_length, nz]
log_probs = epsilon.mul(epsilon) + math.log(math.pi * 2.0)
# apply mask
log_probs = log_probs.mul(tgt_mask.unsqueeze(2))
# [batch]
log_probs = log_probs.view(z.size(0), -1).sum(dim=1).mul(-0.5) + logdet
return z, log_probs
def sample(self, nlengths: int, nsamples: int, src: torch.Tensor,
ctx: torch.Tensor, src_mask: torch.Tensor,
tau=0.0, include_zero=False) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Args:
nlengths: int
number of lengths per sentence
nsamples: int
number of samples per sentence per length
src: Tensor
source encoding [batch, src_length, hidden_size]
ctx: Tensor
tensor for global state [batch, hidden_size]
src_mask: Tensor
tensor of masks [batch, src_length]
tau: float (default 0.0)
temperature of density
include_zero: bool (default False)
include zero sample
Returns: (Tensor1, Tensor2, Tensor3), (Tensor4, Tensor5), (Tensor6, Tensor7, Tensor8)
Tensor1: samples from the prior [batch * nlengths * nsamples, tgt_length, nz]
Tensor2: log probabilities [batch * nlengths * nsamples]
Tensor3: target masks [batch * nlengths * nsamples, tgt_length]
Tensor4: lengths [batch * nlengths]
Tensor5: log probabilities of lengths [batch * nlengths]
Tensor6: source encoding with shape [batch * nlengths * nsamples, src_length, hidden_size]
Tensor7: tensor for global state [batch * nlengths * nsamples, hidden_size]
Tensor8: source masks with shape [batch * nlengths * nsamples, src_length]
"""
batch = src.size(0)
batch_nlen = batch * nlengths
# [batch, nlenths]
lengths, log_probs_length = self.predict_length(ctx, src_mask, topk=nlengths)
# [batch * nlengths]
log_probs_length = log_probs_length.view(-1)
lengths = lengths.view(-1)
max_length = lengths.max().item()
# [batch * nlengths, max_length]
tgt_mask = torch.arange(max_length).to(src.device).unsqueeze(0).expand(batch_nlen, max_length).lt(lengths.unsqueeze(1)).float()
# [batch * nlengths, nsamples, tgt_length, nz]
epsilon = src.new_empty(batch_nlen, nsamples, max_length, self.features).normal_()
epsilon = epsilon.mul(tgt_mask.view(batch_nlen, 1, max_length, 1)) * tau
if include_zero:
epsilon[:, 0].zero_()
# [batch * nlengths * nsamples, tgt_length, nz]
epsilon = epsilon.view(-1, max_length, self.features)
if nsamples * nlengths > 1:
# [batch, nlengths * nsamples, src_length, hidden_size]
src = src.unsqueeze(1) + src.new_zeros(batch, nlengths * nsamples, *src.size()[1:])
# [batch * nlengths * nsamples, src_length, hidden_size]
src = src.view(batch_nlen * nsamples, *src.size()[2:])
# [batch, nlengths * nsamples, hidden_size]
ctx = ctx.unsqueeze(1) + ctx.new_zeros(batch, nlengths * nsamples, ctx.size(1))
# [batch * nlengths * nsamples, hidden_size]
ctx = ctx.view(batch_nlen * nsamples, ctx.size(2))
# [batch, nlengths * nsamples, src_length]
src_mask = src_mask.unsqueeze(1) + src_mask.new_zeros(batch, nlengths * nsamples, src_mask.size(1))
# [batch * nlengths * nsamples, src_length]
src_mask = src_mask.view(batch_nlen * nsamples, src_mask.size(2))
# [batch * nlengths, nsamples, tgt_length]
tgt_mask = tgt_mask.unsqueeze(1) + tgt_mask.new_zeros(batch_nlen, nsamples, tgt_mask.size(1))
# [batch * nlengths * nsamples, tgt_length]
tgt_mask = tgt_mask.view(batch_nlen * nsamples, tgt_mask.size(2))
# [batch * nlength * nsamples, tgt_length, nz]
z, log_probs = self.decode(epsilon, tgt_mask, src, src_mask)
return (z, log_probs, tgt_mask), (lengths, log_probs_length), (src, ctx, src_mask)
def log_probability(self, z: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, ctx: torch.Tensor, src_mask: torch.Tensor,
length_loss: bool = True) -> Tuple[torch.Tensor, Union[torch.Tensor, None]]:
"""
Args:
z: Tensor
tensor of latent code [batch, length, nz]
tgt_mask: Tensor
tensor of target masks [batch, length]
src: Tensor
source encoding [batch, src_length, hidden_size]
ctx: Tensor
tensor for global state [batch, hidden_size]
src_mask: Tensor
tensor of source masks [batch, src_length]
length_loss: bool (default True)
compute loss of length
Returns: Tensor1, Tensor2
Tensor1: log probabilities of z [batch]
Tensor2: length loss [batch]
"""
# [batch]
loss_length = self.length_loss(ctx, src_mask, tgt_mask) if length_loss else None
# [batch, length, nz]
epsilon, logdet = self.flow.bwdpass(z, tgt_mask, src, src_mask)
# [batch, tgt_length, nz]
log_probs = epsilon.mul(epsilon) + math.log(math.pi * 2.0)
# apply mask
log_probs = log_probs.mul(tgt_mask.unsqueeze(2))
log_probs = log_probs.view(z.size(0), -1).sum(dim=1).mul(-0.5) + logdet
return log_probs, loss_length
def init(self, z, tgt_mask, src, src_mask, init_scale=1.0):
return self.flow.bwdpass(z, tgt_mask, src, src_mask, init=True, init_scale=init_scale)
@classmethod
def register(cls, name: str):
Prior._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Prior._registry[name]
@classmethod
def from_params(cls, params: Dict) -> "Prior":
flow_params = params.pop('flow')
flow = NMTFlow.from_params(flow_params)
predictor_params = params.pop('length_predictor')
length_predictor = LengthPredictor.by_name(predictor_params.pop('type')).from_params(predictor_params)
return Prior(flow, length_predictor)
Prior.register('normal')
| 9,219 | 41.293578 | 186 | py |
flowseq | flowseq-master/flownmt/modules/priors/length_predictors/diff_softmax.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.priors.length_predictors.predictor import LengthPredictor
from flownmt.nnet.criterion import LabelSmoothedCrossEntropyLoss
class DiffSoftMaxLengthPredictor(LengthPredictor):
def __init__(self, features, max_src_length, diff_range, dropout=0.0, label_smoothing=0.):
super(DiffSoftMaxLengthPredictor, self).__init__()
self.max_src_length = max_src_length
self.range = diff_range
self.features = features
self.dropout = dropout
self.ctx_proj = None
self.diff = None
if label_smoothing < 1e-5:
self.criterion = nn.CrossEntropyLoss(reduction='none')
elif 1e-5 < label_smoothing < 1.0:
self.criterion = LabelSmoothedCrossEntropyLoss(label_smoothing)
else:
raise ValueError('label smoothing should be less than 1.0.')
def set_length_unit(self, length_unit):
self.length_unit = length_unit
self.ctx_proj = nn.Sequential(nn.Linear(self.features, self.features), nn.ELU(),
nn.Linear(self.features, self.features), nn.ELU())
self.diff = nn.Linear(self.features, 2 * self.range + 1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.ctx_proj[0].bias, 0.)
nn.init.constant_(self.ctx_proj[2].bias, 0.)
nn.init.uniform_(self.diff.weight, -0.1, 0.1)
nn.init.constant_(self.diff.bias, 0.)
def forward(self, ctx):
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
return self.diff(ctx)
@overrides
def loss(self, ctx: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
tgt_mask: Tensor
tensor for target mask [batch, tgt_length]
Returns: Tensor
tensor for loss [batch]
"""
# [batch]
src_lengths = src_mask.sum(dim=1).long()
tgt_lengths = tgt_mask.sum(dim=1).long()
# [batch, 2 * range + 1]
logits = self(ctx)
# [1, 2 * range + 1]
mask = torch.arange(0, logits.size(1), device=logits.device).unsqueeze(0)
# [batch, 2 * range + 1]
mask = (mask + src_lengths.unsqueeze(1) - self.range).fmod(self.length_unit).ne(0)
logits = logits.masked_fill(mask, float('-inf'))
# handle tgt < src - range
x = (tgt_lengths - src_lengths + self.range).clamp(min=0)
tgt = x + src_lengths - self.range
res = tgt.fmod(self.length_unit)
padding = (self.length_unit - res).fmod(self.length_unit)
tgt = tgt + padding
# handle tgt > src + range
x = (tgt - src_lengths + self.range).clamp(max=2 * self.range)
tgt = x + src_lengths - self.range
tgt = tgt - tgt.fmod(self.length_unit)
x = tgt - src_lengths + self.range
loss_length = self.criterion(logits, x)
return loss_length
@overrides
def predict(self, ctx: torch.Tensor, src_mask:torch.Tensor, topk: int = 1) -> Tuple[torch.LongTensor, torch.Tensor]:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
topk: int (default 1)
return top k length candidates for each src sentence
Returns: LongTensor1, Tensor2
LongTensor1: tensor for lengths [batch, topk]
Tensor2: log probs for each length
"""
# [batch]
src_lengths = src_mask.sum(dim=1).long()
# [batch, 2 * range + 1]
logits = self(ctx)
# [1, 2 * range + 1]
x = torch.arange(0, logits.size(1), device=logits.device).unsqueeze(0)
# [batch, 2 * range + 1]
tgt = x + src_lengths.unsqueeze(1) - self.range
mask = tgt.fmod(self.length_unit).ne(0)
logits = logits.masked_fill(mask, float('-inf'))
# [batch, 2 * range + 1]
log_probs = F.log_softmax(logits, dim=1)
# handle tgt length <= 0
mask = tgt.le(0)
log_probs = log_probs.masked_fill(mask, float('-inf'))
# [batch, topk]
log_probs, x = log_probs.topk(topk, dim=1)
lengths = x + src_lengths.unsqueeze(1) - self.range
return lengths, log_probs
@classmethod
def from_params(cls, params: Dict) -> 'DiffSoftMaxLengthPredictor':
return DiffSoftMaxLengthPredictor(**params)
DiffSoftMaxLengthPredictor.register('diff_softmax')
| 4,818 | 38.178862 | 120 | py |
flowseq | flowseq-master/flownmt/modules/priors/length_predictors/predictor.py | from typing import Dict, Tuple
import torch
import torch.nn as nn
class LengthPredictor(nn.Module):
"""
Length Predictor
"""
_registry = dict()
def __init__(self):
super(LengthPredictor, self).__init__()
self.length_unit = None
def set_length_unit(self, length_unit):
self.length_unit = length_unit
def loss(self, ctx: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
tgt_mask: Tensor
tensor for target mask [batch, tgt_length]
Returns: Tensor
tensor for loss [batch]
"""
raise NotImplementedError
def predict(self, ctx: torch.Tensor, src_mask:torch.Tensor, topk: int = 1) -> Tuple[torch.LongTensor, torch.Tensor]:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
topk: int (default 1)
return top k length candidates for each src sentence
Returns: LongTensor1, Tensor2
LongTensor1: tensor for lengths [batch, topk]
Tensor2: log probs for each length
"""
raise NotImplementedError
@classmethod
def register(cls, name: str):
LengthPredictor._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return LengthPredictor._registry[name]
@classmethod
def from_params(cls, params: Dict):
raise NotImplementedError
| 1,712 | 26.629032 | 120 | py |
flowseq | flowseq-master/flownmt/modules/priors/length_predictors/utils.py | from typing import Tuple
import numpy as np
import torch
import torch.nn.functional as F
def discretized_mix_logistic_loss(x, means, logscales, logit_probs,
bin_size, lower, upper) -> torch.Tensor:
"""
loss for discretized mixture logistic distribution
Args:
x: [batch, ]
means: [batch, nmix]
logscales: [batch, nmix]
logit_probs:, [batch, nmix]
bin_size: float
The segment for cdf is [x-binsize, x+binsize]
lower: float
upper: float
Returns:
loss [batch]
"""
eps = 1e-12
# [batch, 1]
x = x.unsqueeze(1)
# [batch, nmix]
centered_x = x - means
if isinstance(logscales, float):
inv_stdv = np.exp(-logscales)
else:
inv_stdv = torch.exp(-logscales)
# [batch, nmix]
min_in = inv_stdv * (centered_x - bin_size)
plus_in = inv_stdv * (centered_x + bin_size)
x_in = inv_stdv * centered_x
# [batch, nmix]
cdf_min = torch.sigmoid(min_in)
cdf_plus = torch.sigmoid(plus_in)
# lower < x < upper
cdf_delta = cdf_plus - cdf_min
log_cdf_mid = torch.log(cdf_delta + eps)
log_cdf_approx = x_in - logscales - 2. * F.softplus(x_in) + np.log(2 * bin_size)
# x < lower
log_cdf_low = plus_in - F.softplus(plus_in)
# x > upper
log_cdf_up = -F.softplus(min_in)
# [batch, nmix]
log_cdf = torch.where(cdf_delta.gt(1e-5), log_cdf_mid, log_cdf_approx)
log_cdf = torch.where(x.ge(lower), log_cdf, log_cdf_low)
log_cdf = torch.where(x.le(upper), log_cdf, log_cdf_up)
# [batch]
loss = torch.logsumexp(log_cdf + logit_probs, dim=1) * -1.
return loss
def discretized_mix_logistic_topk(means, logscales, logit_probs,
range, bin_size, lower, upper, topk=1) -> Tuple[torch.Tensor, torch.LongTensor]:
"""
topk for discretized mixture logistic distribution
Args:
means: [batch, nmix]
logscales: [batch, nmix]
logit_probs:, [batch, nmix]
range: int
range of x
bin_size: float
The segment for cdf is [x-binsize, x+binsize]
lower: float
upper: float
topk: int
Returns: Tensor1, Tensor2
Tensor1: log probs [batch, topk]
Tensor2: indexes for top k [batch, topk]
"""
eps = 1e-12
# [batch, 1, nmix]
means = means.unsqueeze(1)
logscales = logscales.unsqueeze(1)
logit_probs = logit_probs.unsqueeze(1)
# [1, 2 * range + 1, 1]
x = torch.arange(-range, range + 1, 1., device=means.device).unsqueeze(0).unsqueeze(2)
x = x.div(range)
# [batch, 2 * range + 1, nmix]
centered_x = x - means
if isinstance(logscales, float):
inv_stdv = np.exp(-logscales)
else:
inv_stdv = torch.exp(-logscales)
# [batch, 2 * range + 1, nmix]
min_in = inv_stdv * (centered_x - bin_size)
plus_in = inv_stdv * (centered_x + bin_size)
x_in = inv_stdv * centered_x
# [batch, 2 * range + 1, nmix]
cdf_min = torch.sigmoid(min_in)
cdf_plus = torch.sigmoid(plus_in)
# lower < x < upper
cdf_delta = cdf_plus - cdf_min
log_cdf_mid = torch.log(cdf_delta + eps)
log_cdf_approx = x_in - logscales - 2. * F.softplus(x_in) + np.log(2 * bin_size)
# x < lower
log_cdf_low = plus_in - F.softplus(plus_in)
# x > upper
log_cdf_up = -F.softplus(min_in)
# [batch, 2 * range + 1, nmix]
log_cdf = torch.where(cdf_delta.gt(1e-5), log_cdf_mid, log_cdf_approx)
log_cdf = torch.where(x.ge(lower), log_cdf, log_cdf_low)
log_cdf = torch.where(x.le(upper), log_cdf, log_cdf_up)
# [batch, 2 * range + 1]
log_probs = torch.logsumexp(log_cdf + logit_probs, dim=2)
log_probs, idx = log_probs.topk(topk, dim=1)
return log_probs, idx - range
| 3,823 | 29.592 | 114 | py |
flowseq | flowseq-master/flownmt/modules/priors/length_predictors/diff_discretized_mix_logistic.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.priors.length_predictors.predictor import LengthPredictor
from flownmt.modules.priors.length_predictors.utils import discretized_mix_logistic_loss, discretized_mix_logistic_topk
class DiffDiscreteMixLogisticLengthPredictor(LengthPredictor):
def __init__(self, features, max_src_length, diff_range, nmix=1, dropout=0.0):
super(DiffDiscreteMixLogisticLengthPredictor, self).__init__()
self.max_src_length = max_src_length
self.range = diff_range
self.nmix = nmix
self.features = features
self.dropout = dropout
self.ctx_proj = None
self.diff = None
def set_length_unit(self, length_unit):
self.length_unit = length_unit
self.ctx_proj = nn.Sequential(nn.Linear(self.features, self.features), nn.ELU())
self.diff = nn.Linear(self.features, 3 * self.nmix)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.ctx_proj[0].bias, 0.)
nn.init.uniform_(self.diff.weight, -0.1, 0.1)
nn.init.constant_(self.diff.bias, 0.)
def forward(self, ctx):
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
# [batch, 3 * nmix]
coeffs = self.diff(ctx)
# [batch, nmix]
logit_probs = F.log_softmax(coeffs[:, :self.nmix], dim=1)
mu = coeffs[:, self.nmix:self.nmix * 2]
log_scale = coeffs[:, self.nmix * 2:]
return mu, log_scale, logit_probs
@overrides
def loss(self, ctx: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
tgt_mask: Tensor
tensor for target mask [batch, tgt_length]
Returns: Tensor
tensor for loss [batch]
"""
src_lengths = src_mask.sum(dim=1).float()
tgt_lengths = tgt_mask.sum(dim=1).float()
mu, log_scale, logit_probs = self(ctx, src_lengths.long())
x = (tgt_lengths - src_lengths).div(self.range).clamp(min=-1, max=1)
bin_size = 0.5 / self.range
lower = bin_size - 1.0
upper = 1.0 - bin_size
loss = discretized_mix_logistic_loss(x, mu, log_scale, logit_probs, bin_size, lower, upper)
return loss
@overrides
def predict(self, ctx: torch.Tensor, src_mask:torch.Tensor, topk: int = 1) -> Tuple[torch.Tensor, torch.LongTensor]:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
topk: int (default 1)
return top k length candidates for each src sentence
Returns: Tensor1, LongTensor2
Tensor1: log probs for each length
LongTensor2: tensor for lengths [batch, topk]
"""
bin_size = 0.5 / self.range
lower = bin_size - 1.0
upper = 1.0 - bin_size
# [batch]
src_lengths = src_mask.sum(dim=1).long()
mu, log_scale, logit_probs = self(ctx, src_lengths)
# [batch, topk]
log_probs, diffs = discretized_mix_logistic_topk(mu, log_scale, logit_probs,
self.range, bin_size, lower, upper, topk=topk)
lengths = (diffs + src_lengths.unsqueeze(1)).clamp(min=self.length_unit)
res = lengths.fmod(self.length_unit)
padding = (self.length_unit - res).fmod(self.length_unit)
lengths = lengths + padding
return log_probs, lengths
@classmethod
def from_params(cls, params: Dict) -> 'DiffDiscreteMixLogisticLengthPredictor':
return DiffDiscreteMixLogisticLengthPredictor(**params)
DiffDiscreteMixLogisticLengthPredictor.register('diff_logistic')
| 4,046 | 39.069307 | 120 | py |
flowseq | flowseq-master/flownmt/modules/encoders/encoder.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
class Encoder(nn.Module):
"""
Src Encoder to encode source sentence
"""
_registry = dict()
def __init__(self, vocab_size, embed_dim, padding_idx):
super(Encoder, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.embed.weight, -0.1, 0.1)
if self.embed.padding_idx is not None:
with torch.no_grad():
self.embed.weight[self.embed.padding_idx].fill_(0)
@overrides
def forward(self, src_sents, masks=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Encoding src sentences into src encoding representations.
Args:
src_sents: Tensor [batch, length]
masks: Tensor or None [batch, length]
Returns: Tensor1, Tensor2
Tensor1: tensor for src encoding [batch, length, hidden_size]
Tensor2: tensor for global state [batch, hidden_size]
"""
raise NotImplementedError
def init(self, src_sents, masks=None, init_scale=1.0) -> torch.Tensor:
raise NotImplementedError
@classmethod
def register(cls, name: str):
Encoder._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Encoder._registry[name]
@classmethod
def from_params(cls, params: Dict):
raise NotImplementedError
| 1,549 | 28.245283 | 82 | py |
flowseq | flowseq-master/flownmt/modules/encoders/transformer.py | from overrides import overrides
from typing import Dict, Tuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.encoders.encoder import Encoder
from flownmt.nnet.transformer import TransformerEncoderLayer
from flownmt.nnet.positional_encoding import PositionalEncoding
class TransformerCore(nn.Module):
def __init__(self, embed, num_layers, latent_dim, hidden_size, heads, dropout=0.0, max_length=100):
super(TransformerCore, self).__init__()
self.embed = embed
self.padding_idx = embed.padding_idx
embed_dim = embed.embedding_dim
self.embed_scale = math.sqrt(embed_dim)
assert embed_dim == latent_dim
layers = [TransformerEncoderLayer(latent_dim, hidden_size, heads, dropout=dropout) for _ in range(num_layers)]
self.layers = nn.ModuleList(layers)
self.pos_enc = PositionalEncoding(latent_dim, self.padding_idx, max_length + 1)
self.reset_parameters()
def reset_parameters(self):
pass
@overrides
def forward(self, src_sents, masks) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch, leagth, embed_dim]
x = self.embed_scale * self.embed(src_sents)
x += self.pos_enc(src_sents)
x = F.dropout(x, p=0.2, training=self.training)
# [batch, leagth, latent_dim]
key_mask = masks.eq(0)
if not key_mask.any():
key_mask = None
for layer in self.layers:
x = layer(x, key_mask)
x *= masks.unsqueeze(2)
# [batch, latent_dim]
batch = src_sents.size(0)
idx = masks.sum(dim=1).long() - 1
batch_idx = torch.arange(0, batch).long().to(idx.device)
ctx = x[batch_idx, idx]
return x, ctx
class TransformerEncoder(Encoder):
"""
Src Encoder to encode source sentence with Transformer
"""
def __init__(self, vocab_size, embed_dim, padding_idx, num_layers, latent_dim, hidden_size, heads, dropout=0.0, max_length=100):
super(TransformerEncoder, self).__init__(vocab_size, embed_dim, padding_idx)
self.core = TransformerCore(self.embed, num_layers, latent_dim, hidden_size, heads, dropout=dropout, max_length=max_length)
@overrides
def forward(self, src_sents, masks=None) -> Tuple[torch.Tensor, torch.Tensor]:
src_enc, ctx = self.core(src_sents, masks=masks)
return src_enc, ctx
def init(self, src_sents, masks=None, init_scale=1.0) -> torch.Tensor:
with torch.no_grad():
src_enc, _ = self.core(src_sents, masks=masks)
return src_enc
@classmethod
def from_params(cls, params: Dict) -> "TransformerEncoder":
return TransformerEncoder(**params)
TransformerEncoder.register('transformer')
| 2,782 | 34.227848 | 132 | py |
flowseq | flowseq-master/flownmt/modules/encoders/rnn.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.encoders.encoder import Encoder
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
class RecurrentCore(nn.Module):
def __init__(self, embed, rnn_mode, num_layers, latent_dim, hidden_size, dropout=0.0):
super(RecurrentCore, self).__init__()
self.embed = embed
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
assert hidden_size % 2 == 0
self.rnn = RNN(embed.embedding_dim, hidden_size // 2,
num_layers=num_layers, batch_first=True, bidirectional=True)
self.enc_proj = nn.Sequential(nn.Linear(hidden_size, latent_dim), nn.ELU())
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.enc_proj[0].bias, 0.)
@overrides
def forward(self, src_sents, masks) -> Tuple[torch.Tensor, torch.Tensor]:
word_embed = F.dropout(self.embed(src_sents), p=0.2, training=self.training)
lengths = masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(word_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
# [batch, length, hidden_size]
src_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=masks.size(1))
# [batch, length, latent_dim]
src_enc = self.enc_proj(src_enc).mul(masks.unsqueeze(2))
# [batch, latent_dim]
batch = src_sents.size(0)
idx = lengths - 1
batch_idx = torch.arange(0, batch).long().to(idx.device)
ctx = src_enc[batch_idx, idx]
return src_enc, ctx
class RecurrentEncoder(Encoder):
"""
Src Encoder to encode source sentence with Recurrent Neural Networks
"""
def __init__(self, vocab_size, embed_dim, padding_idx, rnn_mode, num_layers, latent_dim, hidden_size, dropout=0.0):
super(RecurrentEncoder, self).__init__(vocab_size, embed_dim, padding_idx)
self.core = RecurrentCore(self.embed, rnn_mode, num_layers, latent_dim, hidden_size, dropout=dropout)
@overrides
def forward(self, src_sents, masks=None) -> Tuple[torch.Tensor, torch.Tensor]:
src_enc, ctx = self.core(src_sents, masks=masks)
return src_enc, ctx
def init(self, src_sents, masks=None, init_scale=1.0) -> torch.Tensor:
with torch.no_grad():
src_enc, _ = self.core(src_sents, masks=masks)
return src_enc
@classmethod
def from_params(cls, params: Dict) -> "RecurrentEncoder":
return RecurrentEncoder(**params)
RecurrentEncoder.register('rnn')
| 2,897 | 36.153846 | 119 | py |
flowseq | flowseq-master/flownmt/modules/posteriors/shift_rnn.py | from overrides import overrides
from typing import Tuple, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from flownmt.nnet.weightnorm import LinearWeightNorm
from flownmt.modules.posteriors.posterior import Posterior
from flownmt.nnet.attention import GlobalAttention
class ShiftRecurrentCore(nn.Module):
def __init__(self, embed, rnn_mode, num_layers, latent_dim, hidden_size, bidirectional=True, use_attn=False, dropout=0.0, dropword=0.0):
super(ShiftRecurrentCore, self).__init__()
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
assert hidden_size % 2 == 0
self.tgt_embed = embed
assert num_layers == 1
self.bidirectional = bidirectional
if bidirectional:
self.rnn = RNN(embed.embedding_dim, hidden_size // 2, num_layers=1, batch_first=True, bidirectional=True)
else:
self.rnn = RNN(embed.embedding_dim, hidden_size, num_layers=1, batch_first=True, bidirectional=False)
self.use_attn = use_attn
if use_attn:
self.attn = GlobalAttention(latent_dim, hidden_size, hidden_size)
self.ctx_proj = nn.Sequential(nn.Linear(hidden_size * 2, hidden_size), nn.ELU())
else:
self.ctx_proj = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.ELU())
self.dropout = dropout
self.dropout2d = nn.Dropout2d(dropword) if dropword > 0. else None # drop entire tokens
self.mu = LinearWeightNorm(hidden_size, latent_dim, bias=True)
self.logvar = LinearWeightNorm(hidden_size, latent_dim, bias=True)
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
tgt_embed = self.tgt_embed(tgt_sents)
if self.dropout2d is not None:
tgt_embed = self.dropout2d(tgt_embed)
lengths = tgt_masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(tgt_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
tgt_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=tgt_masks.size(1))
if self.bidirectional:
# split into fwd and bwd
fwd_tgt_enc, bwd_tgt_enc = tgt_enc.chunk(2, dim=2) # (batch_size, seq_len, hidden_size // 2)
pad_vector = fwd_tgt_enc.new_zeros((fwd_tgt_enc.size(0), 1, fwd_tgt_enc.size(2)))
pad_fwd_tgt_enc = torch.cat([pad_vector, fwd_tgt_enc], dim=1)
pad_bwd_tgt_enc = torch.cat([bwd_tgt_enc, pad_vector], dim=1)
tgt_enc = torch.cat([pad_fwd_tgt_enc[:, :-1], pad_bwd_tgt_enc[:, 1:]], dim=2)
else:
pad_vector = tgt_enc.new_zeros((tgt_enc.size(0), 1, tgt_enc.size(2)))
tgt_enc = torch.cat([pad_vector, tgt_enc], dim=1)[:, :-1]
if self.use_attn:
ctx = self.attn(tgt_enc, src_enc, key_mask=src_masks.eq(0))
ctx = torch.cat([tgt_enc, ctx], dim=2)
else:
ctx = tgt_enc
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
mu = self.mu(ctx) * tgt_masks.unsqueeze(2)
logvar = self.logvar(ctx) * tgt_masks.unsqueeze(2)
return mu, logvar
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True):
with torch.no_grad():
tgt_embed = self.tgt_embed(tgt_sents)
if self.dropout2d is not None:
tgt_embed = self.dropout2d(tgt_embed)
lengths = tgt_masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(tgt_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
tgt_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=tgt_masks.size(1))
if self.bidirectional:
fwd_tgt_enc, bwd_tgt_enc = tgt_enc.chunk(2, dim=2) # (batch_size, seq_len, hidden_size // 2)
pad_vector = fwd_tgt_enc.new_zeros((fwd_tgt_enc.size(0), 1, fwd_tgt_enc.size(2)))
pad_fwd_tgt_enc = torch.cat([pad_vector, fwd_tgt_enc], dim=1)
pad_bwd_tgt_enc = torch.cat([bwd_tgt_enc, pad_vector], dim=1)
tgt_enc = torch.cat([pad_fwd_tgt_enc[:, :-1], pad_bwd_tgt_enc[:, 1:]], dim=2)
else:
pad_vector = tgt_enc.new_zeros((tgt_enc.size(0), 1, tgt_enc.size(2)))
tgt_enc = torch.cat([pad_vector, tgt_enc], dim=1)[:, :-1]
if self.use_attn:
ctx = self.attn.init(tgt_enc, src_enc, key_mask=src_masks.eq(0), init_scale=init_scale)
ctx = torch.cat([tgt_enc, ctx], dim=2)
else:
ctx = tgt_enc
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
mu = self.mu.init(ctx, init_scale=0.05 * init_scale) if init_mu else self.mu(ctx)
logvar = self.logvar.init(ctx, init_scale=0.05 * init_scale) if init_var else self.logvar(ctx)
mu = mu * tgt_masks.unsqueeze(2)
logvar = logvar * tgt_masks.unsqueeze(2)
return mu, logvar
class ShiftRecurrentPosterior(Posterior):
"""
Posterior with Recurrent Neural Networks
"""
def __init__(self, vocab_size, embed_dim, padding_idx, rnn_mode, num_layers, latent_dim, hidden_size,
bidirectional=True, use_attn=False, dropout=0.0, dropword=0.0, _shared_embed=None):
super(ShiftRecurrentPosterior, self).__init__(vocab_size, embed_dim, padding_idx, _shared_embed=_shared_embed)
self.core = ShiftRecurrentCore(self.tgt_embed, rnn_mode, num_layers, latent_dim, hidden_size,
bidirectional=bidirectional, use_attn=use_attn, dropout=dropout, dropword=dropword)
def target_embed_weight(self):
if isinstance(self.core, nn.DataParallel):
return self.core.module.tgt_embedd.weight
else:
return self.core.tgt_embed.weight
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
return self.core(tgt_sents, tgt_masks, src_enc, src_masks)
@overrides
def sample(self, tgt_sents: torch.Tensor, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int =1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core(tgt_sents, tgt_masks, src_enc, src_masks)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, nsamples=nsamples, random=random)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
return z, log_probs
@overrides
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=init_mu, init_var=init_var)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, random=True)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
z = z.squeeze(1)
log_probs = log_probs.squeeze(1)
return z, log_probs
@classmethod
def from_params(cls, params: Dict) -> "ShiftRecurrentPosterior":
return ShiftRecurrentPosterior(**params)
ShiftRecurrentPosterior.register('shift_rnn')
| 7,669 | 49.460526 | 143 | py |
flowseq | flowseq-master/flownmt/modules/posteriors/transformer.py | from overrides import overrides
from typing import Tuple, Dict
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.nnet.weightnorm import LinearWeightNorm
from flownmt.nnet.transformer import TransformerDecoderLayer
from flownmt.nnet.positional_encoding import PositionalEncoding
from flownmt.modules.posteriors.posterior import Posterior
class TransformerCore(nn.Module):
def __init__(self, embed, num_layers, latent_dim, hidden_size, heads, dropout=0.0, dropword=0.0, max_length=100):
super(TransformerCore, self).__init__()
self.tgt_embed = embed
self.padding_idx = embed.padding_idx
embed_dim = embed.embedding_dim
self.embed_scale = math.sqrt(embed_dim)
assert embed_dim == latent_dim
layers = [TransformerDecoderLayer(latent_dim, hidden_size, heads, dropout=dropout) for _ in range(num_layers)]
self.layers = nn.ModuleList(layers)
self.pos_enc = PositionalEncoding(latent_dim, self.padding_idx, max_length + 1)
self.dropword = dropword # drop entire tokens
self.mu = LinearWeightNorm(latent_dim, latent_dim, bias=True)
self.logvar = LinearWeightNorm(latent_dim, latent_dim, bias=True)
self.reset_parameters()
def reset_parameters(self):
pass
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
x = self.embed_scale * self.tgt_embed(tgt_sents)
x = F.dropout2d(x, p=self.dropword, training=self.training)
x += self.pos_enc(tgt_sents)
x = F.dropout(x, p=0.2, training=self.training)
mask = tgt_masks.eq(0)
key_mask = src_masks.eq(0)
for layer in self.layers:
x = layer(x, mask, src_enc, key_mask)
mu = self.mu(x) * tgt_masks.unsqueeze(2)
logvar = self.logvar(x) * tgt_masks.unsqueeze(2)
return mu, logvar
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True):
with torch.no_grad():
x = self.embed_scale * self.tgt_embed(tgt_sents)
x = F.dropout2d(x, p=self.dropword, training=self.training)
x += self.pos_enc(tgt_sents)
x = F.dropout(x, p=0.2, training=self.training)
mask = tgt_masks.eq(0)
key_mask = src_masks.eq(0)
for layer in self.layers:
x = layer.init(x, mask, src_enc, key_mask, init_scale=init_scale)
x = x * tgt_masks.unsqueeze(2)
mu = self.mu.init(x, init_scale=0.05 * init_scale) if init_mu else self.mu(x)
logvar = self.logvar.init(x, init_scale=0.05 * init_scale) if init_var else self.logvar(x)
mu = mu * tgt_masks.unsqueeze(2)
logvar = logvar * tgt_masks.unsqueeze(2)
return mu, logvar
class TransformerPosterior(Posterior):
"""
Posterior with Transformer
"""
def __init__(self, vocab_size, embed_dim, padding_idx, num_layers, latent_dim, hidden_size, heads,
dropout=0.0, dropword=0.0, max_length=100, _shared_embed=None):
super(TransformerPosterior, self).__init__(vocab_size, embed_dim, padding_idx, _shared_embed=_shared_embed)
self.core = TransformerCore(self.tgt_embed, num_layers, latent_dim, hidden_size, heads,
dropout=dropout, dropword=dropword, max_length=max_length)
def target_embed_weight(self):
if isinstance(self.core, nn.DataParallel):
return self.core.module.tgt_embedd.weight
else:
return self.core.tgt_embed.weight
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
return self.core(tgt_sents, tgt_masks, src_enc, src_masks)
@overrides
def sample(self, tgt_sents: torch.Tensor, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int =1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core(tgt_sents, tgt_masks, src_enc, src_masks)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, nsamples=nsamples, random=random)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
return z, log_probs
@overrides
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=init_mu, init_var=init_var)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, random=True)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
z = z.squeeze(1)
log_probs = log_probs.squeeze(1)
return z, log_probs
@classmethod
def from_params(cls, params: Dict) -> "TransformerPosterior":
return TransformerPosterior(**params)
TransformerPosterior.register('transformer')
| 5,026 | 42.713043 | 143 | py |
flowseq | flowseq-master/flownmt/modules/posteriors/rnn.py | from overrides import overrides
from typing import Tuple, Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from flownmt.nnet.weightnorm import LinearWeightNorm
from flownmt.modules.posteriors.posterior import Posterior
from flownmt.nnet.attention import GlobalAttention
class RecurrentCore(nn.Module):
def __init__(self, embed, rnn_mode, num_layers, latent_dim, hidden_size, use_attn=False, dropout=0.0, dropword=0.0):
super(RecurrentCore, self).__init__()
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
assert hidden_size % 2 == 0
self.tgt_embed = embed
self.rnn = RNN(embed.embedding_dim, hidden_size // 2,
num_layers=num_layers, batch_first=True, bidirectional=True)
self.use_attn = use_attn
if use_attn:
self.attn = GlobalAttention(latent_dim, hidden_size, hidden_size, hidden_features=hidden_size)
self.ctx_proj = nn.Sequential(nn.Linear(hidden_size * 2, hidden_size), nn.ELU())
else:
self.ctx_proj = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.ELU())
self.dropout = dropout
self.dropout2d = nn.Dropout2d(dropword) if dropword > 0. else None # drop entire tokens
self.mu = LinearWeightNorm(hidden_size, latent_dim, bias=True)
self.logvar = LinearWeightNorm(hidden_size, latent_dim, bias=True)
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
tgt_embed = self.tgt_embed(tgt_sents)
if self.dropout2d is not None:
tgt_embed = self.dropout2d(tgt_embed)
lengths = tgt_masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(tgt_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
tgt_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=tgt_masks.size(1))
if self.use_attn:
ctx = self.attn(tgt_enc, src_enc, key_mask=src_masks.eq(0))
ctx = torch.cat([tgt_enc, ctx], dim=2)
else:
ctx = tgt_enc
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
mu = self.mu(ctx) * tgt_masks.unsqueeze(2)
logvar = self.logvar(ctx) * tgt_masks.unsqueeze(2)
return mu, logvar
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True):
with torch.no_grad():
tgt_embed = self.tgt_embed(tgt_sents)
if self.dropout2d is not None:
tgt_embed = self.dropout2d(tgt_embed)
lengths = tgt_masks.sum(dim=1).long()
packed_embed = pack_padded_sequence(tgt_embed, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_embed)
tgt_enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=tgt_masks.size(1))
if self.use_attn:
ctx = self.attn.init(tgt_enc, src_enc, key_mask=src_masks.eq(0), init_scale=init_scale)
ctx = torch.cat([tgt_enc, ctx], dim=2)
else:
ctx = tgt_enc
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
mu = self.mu.init(ctx, init_scale=0.05 * init_scale) if init_mu else self.mu(ctx)
logvar = self.logvar.init(ctx, init_scale=0.05 * init_scale) if init_var else self.logvar(ctx)
mu = mu * tgt_masks.unsqueeze(2)
logvar = logvar * tgt_masks.unsqueeze(2)
return mu, logvar
class RecurrentPosterior(Posterior):
"""
Posterior with Recurrent Neural Networks
"""
def __init__(self, vocab_size, embed_dim, padding_idx, rnn_mode, num_layers, latent_dim, hidden_size,
use_attn=False, dropout=0.0, dropword=0.0, _shared_embed=None):
super(RecurrentPosterior, self).__init__(vocab_size, embed_dim, padding_idx, _shared_embed=_shared_embed)
self.core = RecurrentCore(self.tgt_embed, rnn_mode, num_layers, latent_dim, hidden_size,
use_attn=use_attn, dropout=dropout, dropword=dropword)
def target_embed_weight(self):
if isinstance(self.core, nn.DataParallel):
return self.core.module.tgt_embedd.weight
else:
return self.core.tgt_embed.weight
@overrides
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
return self.core(tgt_sents, tgt_masks, src_enc, src_masks)
@overrides
def sample(self, tgt_sents: torch.Tensor, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int =1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core(tgt_sents, tgt_masks, src_enc, src_masks)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, nsamples=nsamples, random=random)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
return z, log_probs
@overrides
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True) -> Tuple[torch.Tensor, torch.Tensor]:
mu, logvar = self.core.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=init_mu, init_var=init_var)
z, eps = Posterior.reparameterize(mu, logvar, tgt_masks, random=True)
log_probs = Posterior.log_probability(z, eps, mu, logvar, tgt_masks)
z = z.squeeze(1)
log_probs = log_probs.squeeze(1)
return z, log_probs
@classmethod
def from_params(cls, params: Dict) -> "RecurrentPosterior":
return RecurrentPosterior(**params)
RecurrentPosterior.register('rnn')
| 6,032 | 47.264 | 143 | py |
flowseq | flowseq-master/flownmt/modules/posteriors/posterior.py | import math
from typing import Dict, Tuple
import torch
import torch.nn as nn
class Posterior(nn.Module):
"""
posterior class
"""
_registry = dict()
def __init__(self, vocab_size, embed_dim, padding_idx, _shared_embed=None):
super(Posterior, self).__init__()
if _shared_embed is None:
self.tgt_embed = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.reset_parameters()
else:
self.tgt_embed = _shared_embed
def reset_parameters(self):
nn.init.uniform_(self.tgt_embed.weight, -0.1, 0.1)
if self.tgt_embed.padding_idx is not None:
with torch.no_grad():
self.tgt_embed.weight[self.tgt_embed.padding_idx].fill_(0)
def target_embed_weight(self):
raise NotImplementedError
@staticmethod
def reparameterize(mu, logvar, mask, nsamples=1, random=True):
# [batch, length, dim]
size = mu.size()
std = logvar.mul(0.5).exp()
# [batch, nsamples, length, dim]
if random:
eps = torch.randn(size[0], nsamples, *size[1:], device=mu.device)
eps *= mask.view(size[0], 1, size[1], 1)
else:
eps = mu.new_zeros(size[0], nsamples, *size[1:])
return eps.mul(std.unsqueeze(1)).add(mu.unsqueeze(1)), eps
@staticmethod
def log_probability(z, eps, mu, logvar, mask):
size = eps.size()
nz = size[3]
# [batch, nsamples, length, nz]
log_probs = logvar.unsqueeze(1) + eps.pow(2)
# [batch, 1]
cc = mask.sum(dim=1, keepdim=True) * (math.log(math.pi * 2.) * nz)
# [batch, nsamples, length * nz] --> [batch, nsamples]
log_probs = log_probs.view(size[0], size[1], -1).sum(dim=2) + cc
return log_probs * -0.5
def forward(self, tgt_sents, tgt_masks, src_enc, src_masks):
raise NotImplementedError
def init(self, tgt_sents, tgt_masks, src_enc, src_masks, init_scale=1.0, init_mu=True, init_var=True) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
def sample(self, tgt_sents: torch.Tensor, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int =1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
tgt_masks: Tensor [batch, tgt_length]
tensor for target masks
src_enc: Tensor [batch, src_length, hidden_size]
tensor for source encoding
src_masks: Tensor [batch, src_length]
tensor for source masks
nsamples: int
number of samples
random: bool
if True, perform random sampling. Otherwise, return mean.
Returns: Tensor1, Tensor2
Tensor1: samples from the posterior [batch, nsamples, tgt_length, nz]
Tensor2: log probabilities [batch, nsamples]
"""
raise NotImplementedError
@classmethod
def register(cls, name: str):
Posterior._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Posterior._registry[name]
@classmethod
def from_params(cls, params: Dict):
raise NotImplementedError
| 3,375 | 33.10101 | 143 | py |
flowseq | flowseq-master/flownmt/flows/nmt.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
from flownmt.flows.flow import Flow
from flownmt.flows.actnorm import ActNormFlow
from flownmt.flows.linear import InvertibleMultiHeadFlow
from flownmt.flows.couplings.coupling import NICE
from flownmt.utils import squeeze, unsqueeze, split, unsplit
class NMTFlowPOSAttnUnit(Flow):
"""
Unit for POS Attention
"""
def __init__(self, features, src_features, hidden_features=None, inverse=False,
transform='affine', heads=1, max_length=100, dropout=0.0):
super(NMTFlowPOSAttnUnit, self).__init__(inverse)
self.actnorm = ActNormFlow(features, inverse=inverse)
self.coupling_up = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='up',
transform=transform, type='self_attn', heads=heads,
dropout=dropout, pos_enc='attn', max_length=max_length)
self.coupling_down = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='down',
transform=transform, type='self_attn', heads=heads,
dropout=dropout, pos_enc='add', max_length=max_length)
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm.forward(input, tgt_mask)
out, logdet = self.coupling_up.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling_down.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# block1 dim=2, type=continuous
out, logdet_accum = self.coupling_down.backward(input, tgt_mask, src, src_mask)
out, logdet = self.coupling_up.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm.init(data, tgt_mask, init_scale=init_scale)
out, logdet = self.coupling_up.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling_down.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class NMTFlowUnit(Flow):
"""
One Unit of NMTFlowStep
"""
def __init__(self, features, src_features, hidden_features=None, inverse=False, transform='affine',
coupling_type='conv', kernel_size=3, rnn_mode='LSTM', heads=1, max_length=100,
dropout=0.0, split_timestep=True):
super(NMTFlowUnit, self).__init__(inverse)
# dim=2, type=continuous
self.coupling1_up = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='up',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.coupling1_down = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='down',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.actnorm1 = ActNormFlow(features, inverse=inverse)
# dim=2, type=skip
self.coupling2_up = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='skip', order='up',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.coupling2_down = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='skip', order='down',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.split_timestep = split_timestep
if split_timestep:
self.actnorm2 = ActNormFlow(features, inverse=inverse)
# dim=1, type=skip
self.coupling3_up = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=1, split_type='skip', order='up',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
self.coupling3_down = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=1, split_type='skip', order='down',
transform=transform, type=coupling_type, kernel=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, pos_enc='add', max_length=max_length)
else:
self.actnorm2 = None
self.coupling3_up = None
self.coupling3_down = None
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# block1 dim=2, type=continuous
out, logdet_accum = self.coupling1_up.forward(input, tgt_mask, src, src_mask)
out, logdet = self.coupling1_down.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
out, logdet = self.actnorm1.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block2 dim=2, type=skip
out, logdet = self.coupling2_up.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling2_down.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
if self.split_timestep:
# ================================================================================
out, logdet = self.actnorm2.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block3 dim=1, type=skip
out, logdet = self.coupling3_up.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling3_down.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if self.split_timestep:
# block3 dim=1, type=skip
out, logdet_accum = self.coupling3_down.backward(input, tgt_mask, src, src_mask)
out, logdet = self.coupling3_up.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
out, logdet = self.actnorm2.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
else:
out, logdet_accum = input, 0
# block2 dim=2, type=skip
out, logdet = self.coupling2_down.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling2_up.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
out, logdet = self.actnorm1.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block1 dim=2, type=continuous
out, logdet = self.coupling1_down.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling1_up.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
# block1 dim=2, type=continuous
out, logdet_accum = self.coupling1_up.init(data, tgt_mask, src, src_mask, init_scale=init_scale)
out, logdet = self.coupling1_down.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
# ================================================================================
out, logdet = self.actnorm1.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block2 dim=2, type=skip
out, logdet = self.coupling2_up.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling2_down.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
if self.split_timestep:
# ================================================================================
out, logdet = self.actnorm2.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
# ================================================================================
# block3 dim=1, type=skip
out, logdet = self.coupling3_up.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.coupling3_down.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class NMTFlowStep(Flow):
"""
One step of NMTFlow
"""
def __init__(self, features, src_features, hidden_features=None, inverse=False, transform='affine',
coupling_type='conv', kernel_size=3, rnn_mode='LSTM', heads=1, max_length=100,
dropout=0.0, split_timestep=True):
super(NMTFlowStep, self).__init__(inverse)
self.actnorm1 = ActNormFlow(features, inverse=inverse)
self.linear1 = InvertibleMultiHeadFlow(features, type='A', inverse=inverse)
self.unit1 = NMTFlowUnit(features, src_features, hidden_features=hidden_features, inverse=inverse,
transform=transform, coupling_type=coupling_type, kernel_size=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, max_length=max_length, split_timestep=split_timestep)
self.actnorm2 = ActNormFlow(features, inverse=inverse)
self.linear2 = InvertibleMultiHeadFlow(features, type='B', inverse=inverse)
self.unit2 = NMTFlowUnit(features, src_features, hidden_features=hidden_features, inverse=inverse,
transform=transform, coupling_type=coupling_type, kernel_size=kernel_size, rnn_mode=rnn_mode,
heads=heads, dropout=dropout, max_length=max_length, split_timestep=split_timestep)
def sync(self):
self.linear1.sync()
self.linear2.sync()
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm1.forward(input, tgt_mask)
out, logdet = self.linear1.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit1.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm2.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.linear2.forward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit2.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.unit2.backward(input, tgt_mask, src, src_mask)
out, logdet = self.linear2.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm2.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit1.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.linear1.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm1.backward(out, tgt_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet_accum = self.actnorm1.init(data, tgt_mask, init_scale=init_scale)
out, logdet = self.linear1.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit1.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.actnorm2.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.linear2.init(out, tgt_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
out, logdet = self.unit2.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class NMTFlowBlock(Flow):
"""
NMT Flow Block
"""
def __init__(self, num_steps, features, src_features, hidden_features=None, inverse=False, prior=False, factor=2,
transform='affine', coupling_type='conv', kernel_size=3, rnn_mode='LSTM', heads=1, max_length=100,
dropout=0.0, pos_attn=False):
super(NMTFlowBlock, self).__init__(inverse)
if pos_attn:
self.pos_attn = NMTFlowPOSAttnUnit(features, src_features,hidden_features=hidden_features,
inverse=inverse, transform=transform, heads=heads,
max_length=max_length, dropout=dropout)
else:
self.pos_attn = None
steps = [NMTFlowStep(features, src_features, hidden_features=hidden_features, inverse=inverse,
transform=transform, coupling_type=coupling_type, kernel_size=kernel_size,
rnn_mode=rnn_mode, heads=heads, max_length=max_length,
dropout=dropout, split_timestep=prior) for _ in range(num_steps)]
self.steps = nn.ModuleList(steps)
if prior:
assert features % factor == 0, 'features {} should divide factor {}'.format(features, factor)
self.prior = NICE(src_features, features, hidden_features=hidden_features, inverse=inverse,
split_dim=2, split_type='continuous', order='up', factor=factor,
transform=transform, type=coupling_type, kernel=kernel_size,
heads=heads, rnn_mode=rnn_mode, pos_enc='add', max_length=max_length, dropout=dropout)
self.z_features = features - features // factor
assert self.z_features == self.prior.z1_channels
else:
self.prior = None
self.z_features = features
def sync(self):
for step in self.steps:
step.sync()
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch]
if self.pos_attn is None:
logdet_accum = input.new_zeros(input.size(0))
out = input
else:
out, logdet_accum = self.pos_attn.forward(input, tgt_mask, src, src_mask)
for step in self.steps:
out, logdet = step.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
if self.prior is not None:
out, logdet = self.prior.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if self.prior is None:
logdet_accum = input.new_zeros(input.size(0))
out = input
else:
out, logdet_accum = self.prior.backward(input, tgt_mask, src, src_mask)
for step in reversed(self.steps):
out, logdet = step.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
if self.pos_attn is not None:
out, logdet = self.pos_attn.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch]
if self.pos_attn is None:
logdet_accum = data.new_zeros(data.size(0))
out = data
else:
out, logdet_accum = self.pos_attn.init(data, tgt_mask, src, src_mask, init_scale=init_scale)
for step in self.steps:
out, logdet = step.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
if self.prior is not None:
out, logdet = self.prior.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
return out, logdet_accum
class NMTFlow(Flow):
"""
NMT Flow
"""
def __init__(self, levels, num_steps, features, src_features, factors, hidden_features=None, inverse=False,
transform='affine', coupling_type='conv', kernel_size=3, rnn_mode='LSTM', heads=1, pos_enc='add', max_length=100, dropout=0.0):
super(NMTFlow, self).__init__(inverse)
assert levels == len(num_steps)
assert levels == len(factors) + 1
blocks = []
self.levels = levels
self.features = features
pos_attn = coupling_type == 'self_attn' and pos_enc == 'attn'
for level in range(levels):
if level == levels - 1:
block = NMTFlowBlock(num_steps[level], features, src_features, hidden_features=hidden_features,
inverse=inverse, prior=False, coupling_type=coupling_type, transform=transform,
kernel_size=kernel_size, rnn_mode=rnn_mode, heads=heads, max_length=max_length,
dropout=dropout, pos_attn=pos_attn)
else:
factor = factors[level]
block = NMTFlowBlock(num_steps[level], features, src_features, hidden_features=hidden_features,
inverse=inverse, prior=True, factor=factor, coupling_type=coupling_type,
transform=transform,kernel_size=kernel_size, rnn_mode=rnn_mode, heads=heads,
max_length=max_length, dropout=dropout, pos_attn=pos_attn)
features = block.z_features * 2
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
def sync(self):
for block in self.blocks:
block.sync()
@overrides
def forward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
logdet_accum = input.new_zeros(input.size(0))
out = input
outputs = []
for i, block in enumerate(self.blocks):
out, logdet = block.forward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
if i < self.levels - 1:
out1, out2 = split(out, block.z_features)
outputs.append(out2)
out, tgt_mask = squeeze(out1, tgt_mask)
for _ in range(self.levels - 1):
out2 = outputs.pop()
out = unsqueeze(out)
out = unsplit([out, out2])
assert len(outputs) == 0
return out, logdet_accum
@overrides
def backward(self, input: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
outputs = []
masks = []
out = input
for i in range(self.levels - 1):
out1, out2 = split(out, self.blocks[i].z_features)
outputs.append(out2)
masks.append(tgt_mask)
out, tgt_mask = squeeze(out1, tgt_mask)
logdet_accum = input.new_zeros(input.size(0))
for i, block in enumerate(reversed(self.blocks)):
if i > 0:
out2 = outputs.pop()
tgt_mask = masks.pop()
out1 = unsqueeze(out)
out = unsplit([out1, out2])
out, logdet = block.backward(out, tgt_mask, src, src_mask)
logdet_accum = logdet_accum + logdet
assert len(outputs) == 0
assert len(masks) == 0
return out, logdet_accum
@overrides
def init(self, data: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
logdet_accum = data.new_zeros(data.size(0))
out = data
outputs = []
for i, block in enumerate(self.blocks):
out, logdet = block.init(out, tgt_mask, src, src_mask, init_scale=init_scale)
logdet_accum = logdet_accum + logdet
if i < self.levels - 1:
out1, out2 = split(out, block.z_features)
outputs.append(out2)
out, tgt_mask = squeeze(out1, tgt_mask)
for _ in range(self.levels - 1):
out2 = outputs.pop()
out = unsqueeze(out)
out = unsplit([out, out2])
assert len(outputs) == 0
return out, logdet_accum
@classmethod
def from_params(cls, params: Dict) -> "NMTFlow":
return NMTFlow(**params)
NMTFlow.register('nmt')
| 24,648 | 44.815985 | 144 | py |
flowseq | flowseq-master/flownmt/flows/flow.py | from typing import Dict, Tuple
import torch
import torch.nn as nn
class Flow(nn.Module):
"""
Normalizing Flow base class
"""
_registry = dict()
def __init__(self, inverse):
super(Flow, self).__init__()
self.inverse = inverse
def forward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
*input: input [batch, *input_size]
Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch]
out, the output of the flow
logdet, the log determinant of :math:`\partial output / \partial input`
"""
raise NotImplementedError
def backward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
*input: input [batch, *input_size]
Returns: out: Tensor [batch, *input_size], logdet: Tensor [batch]
out, the output of the flow
logdet, the log determinant of :math:`\partial output / \partial input`
"""
raise NotImplementedError
def init(self, *input, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
def fwdpass(self, x: torch.Tensor, *h, init=False, init_scale=1.0, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
x: Tensor
The random variable before flow
h: list of object
other conditional inputs
init: bool
perform initialization or not (default: False)
init_scale: float
initial scale (default: 1.0)
Returns: y: Tensor, logdet: Tensor
y, the random variable after flow
logdet, the log determinant of :math:`\partial y / \partial x`
Then the density :math:`\log(p(y)) = \log(p(x)) - logdet`
"""
if self.inverse:
if init:
raise RuntimeError('inverse flow shold be initialized with backward pass')
else:
return self.backward(x, *h, **kwargs)
else:
if init:
return self.init(x, *h, init_scale=init_scale, **kwargs)
else:
return self.forward(x, *h, **kwargs)
def bwdpass(self, y: torch.Tensor, *h, init=False, init_scale=1.0, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
y: Tensor
The random variable after flow
h: list of object
other conditional inputs
init: bool
perform initialization or not (default: False)
init_scale: float
initial scale (default: 1.0)
Returns: x: Tensor, logdet: Tensor
x, the random variable before flow
logdet, the log determinant of :math:`\partial x / \partial y`
Then the density :math:`\log(p(y)) = \log(p(x)) + logdet`
"""
if self.inverse:
if init:
return self.init(y, *h, init_scale=init_scale, **kwargs)
else:
return self.forward(y, *h, **kwargs)
else:
if init:
raise RuntimeError('forward flow should be initialzed with forward pass')
else:
return self.backward(y, *h, **kwargs)
@classmethod
def register(cls, name: str):
Flow._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Flow._registry[name]
@classmethod
def from_params(cls, params: Dict):
raise NotImplementedError
| 3,608 | 30.657895 | 118 | py |
flowseq | flowseq-master/flownmt/flows/actnorm.py | from overrides import overrides
from typing import Dict, Tuple
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Parameter
from flownmt.flows.flow import Flow
class ActNormFlow(Flow):
def __init__(self, in_features, inverse=False):
super(ActNormFlow, self).__init__(inverse)
self.in_features = in_features
self.log_scale = Parameter(torch.Tensor(in_features))
self.bias = Parameter(torch.Tensor(in_features))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.log_scale, mean=0, std=0.05)
nn.init.constant_(self.bias, 0.)
@overrides
def forward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
dim = input.dim()
out = input * self.log_scale.exp() + self.bias
out = out * mask.unsqueeze(dim - 1)
logdet = self.log_scale.sum(dim=0, keepdim=True)
if dim > 2:
num = mask.view(out.size(0), -1).sum(dim=1)
logdet = logdet * num
return out, logdet
@overrides
def backward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
dim = input.dim()
out = (input - self.bias) * mask.unsqueeze(dim - 1)
out = out.div(self.log_scale.exp() + 1e-8)
logdet = self.log_scale.sum(dim=0, keepdim=True) * -1.0
if dim > 2:
num = mask.view(out.size(0), -1).sum(dim=1)
logdet = logdet * num
return out, logdet
@overrides
def init(self, data: torch.Tensor, mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
data: input: Tensor
input tensor [batch, N1, N2, ..., in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
init_scale: float
initial scale
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
with torch.no_grad():
out, _ = self.forward(data, mask)
mean = out.view(-1, self.in_features).mean(dim=0)
std = out.view(-1, self.in_features).std(dim=0)
inv_stdv = init_scale / (std + 1e-6)
self.log_scale.add_(inv_stdv.log())
self.bias.add_(-mean).mul_(inv_stdv)
return self.forward(data, mask)
@overrides
def extra_repr(self):
return 'inverse={}, in_features={}'.format(self.inverse, self.in_features)
@classmethod
def from_params(cls, params: Dict) -> "ActNormFlow":
return ActNormFlow(**params)
ActNormFlow.register('actnorm')
| 3,655 | 32.851852 | 112 | py |
flowseq | flowseq-master/flownmt/flows/linear.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from flownmt.flows.flow import Flow
class InvertibleLinearFlow(Flow):
def __init__(self, in_features, inverse=False):
super(InvertibleLinearFlow, self).__init__(inverse)
self.in_features = in_features
self.weight = Parameter(torch.Tensor(in_features, in_features))
self.register_buffer('weight_inv', self.weight.data.clone())
self.reset_parameters()
def reset_parameters(self):
nn.init.orthogonal_(self.weight)
self.sync()
def sync(self):
self.weight_inv.copy_(self.weight.data.inverse())
@overrides
def forward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
dim = input.dim()
# [batch, N1, N2, ..., in_features]
out = F.linear(input, self.weight)
_, logdet = torch.slogdet(self.weight)
if dim > 2:
num = mask.view(out.size(0), -1).sum(dim=1)
logdet = logdet * num
return out, logdet
@overrides
def backward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
dim = input.dim()
# [batch, N1, N2, ..., in_features]
out = F.linear(input, self.weight_inv)
_, logdet = torch.slogdet(self.weight_inv)
if dim > 2:
num = mask.view(out.size(0), -1).sum(dim=1)
logdet = logdet * num
return out, logdet
@overrides
def init(self, data: torch.Tensor, mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad():
return self.forward(data)
@overrides
def extra_repr(self):
return 'inverse={}, in_features={}'.format(self.inverse, self.in_features)
@classmethod
def from_params(cls, params: Dict) -> "InvertibleLinearFlow":
return InvertibleLinearFlow(**params)
class InvertibleMultiHeadFlow(Flow):
@staticmethod
def _get_heads(in_features):
units = [32, 16, 8]
for unit in units:
if in_features % unit == 0:
return in_features // unit
assert in_features < 8, 'features={}'.format(in_features)
return 1
def __init__(self, in_features, heads=None, type='A', inverse=False):
super(InvertibleMultiHeadFlow, self).__init__(inverse)
self.in_features = in_features
if heads is None:
heads = InvertibleMultiHeadFlow._get_heads(in_features)
self.heads = heads
self.type = type
assert in_features % heads == 0, 'features ({}) should be divided by heads ({})'.format(in_features, heads)
assert type in ['A', 'B'], 'type should belong to [A, B]'
self.weight = Parameter(torch.Tensor(in_features // heads, in_features // heads))
self.register_buffer('weight_inv', self.weight.data.clone())
self.reset_parameters()
def reset_parameters(self):
nn.init.orthogonal_(self.weight)
self.sync()
def sync(self):
self.weight_inv.copy_(self.weight.data.inverse())
@overrides
def forward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
size = input.size()
dim = input.dim()
# [batch, N1, N2, ..., heads, in_features/ heads]
if self.type == 'A':
out = input.view(*size[:-1], self.heads, self.in_features // self.heads)
else:
out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1)
out = F.linear(out, self.weight)
if self.type == 'B':
out = out.transpose(-2, -1).contiguous()
out = out.view(*size)
_, logdet = torch.slogdet(self.weight)
if dim > 2:
num = mask.view(size[0], -1).sum(dim=1) * self.heads
logdet = logdet * num
return out, logdet
@overrides
def backward(self, input: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, N1, N2, ..., Nl, in_features]
mask: Tensor
mask tensor [batch, N1, N2, ...,Nl]
Returns: out: Tensor , logdet: Tensor
out: [batch, N1, N2, ..., in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
size = input.size()
dim = input.dim()
# [batch, N1, N2, ..., heads, in_features/ heads]
if self.type == 'A':
out = input.view(*size[:-1], self.heads, self.in_features // self.heads)
else:
out = input.view(*size[:-1], self.in_features // self.heads, self.heads).transpose(-2, -1)
out = F.linear(out, self.weight_inv)
if self.type == 'B':
out = out.transpose(-2, -1).contiguous()
out = out.view(*size)
_, logdet = torch.slogdet(self.weight_inv)
if dim > 2:
num = mask.view(size[0], -1).sum(dim=1) * self.heads
logdet = logdet * num
return out, logdet
@overrides
def init(self, data: torch.Tensor, mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad():
return self.forward(data, mask)
@overrides
def extra_repr(self):
return 'inverse={}, in_features={}, heads={}, type={}'.format(self.inverse, self.in_features, self.heads, self.type)
@classmethod
def from_params(cls, params: Dict) -> "InvertibleMultiHeadFlow":
return InvertibleMultiHeadFlow(**params)
InvertibleLinearFlow.register('invertible_linear')
InvertibleMultiHeadFlow.register('invertible_multihead')
| 7,141 | 34.356436 | 124 | py |
flowseq | flowseq-master/flownmt/flows/parallel/data_parallel.py | from overrides import overrides
from typing import Tuple
import torch
from torch.nn.parallel.replicate import replicate
from flownmt.flows.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from torch.nn.parallel.data_parallel import _check_balance
from flownmt.flows.flow import Flow
class DataParallelFlow(Flow):
"""
Implements data parallelism at the flow level.
"""
def __init__(self, flow: Flow, device_ids=None, output_device=None, dim=0):
super(DataParallelFlow, self).__init__(flow.inverse)
if not torch.cuda.is_available():
self.flow = flow
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.flow = flow
self.device_ids = device_ids
self.output_device = output_device
_check_balance(self.device_ids)
if len(self.device_ids) == 1:
self.flow.cuda(device_ids[0])
@overrides
def forward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
if not self.device_ids:
return self.flow.forward(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.flow.forward(*inputs[0], **kwargs[0])
replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
@overrides
def backward(self, *inputs, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
if not self.device_ids:
return self.flow.backward(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.flow.backward(*inputs[0], **kwargs[0])
replicas = self.replicate(self.flow, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs, backward=True)
return self.gather(outputs, self.output_device)
@overrides
def init(self, *input, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
return self.flow.init(*input, **kwargs)
def replicate(self, flow, device_ids):
return replicate(flow, device_ids)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs, backward=False):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)], backward=backward)
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
| 2,891 | 37.56 | 107 | py |
flowseq | flowseq-master/flownmt/flows/parallel/parallel_apply.py | import threading
import torch
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(flows, inputs, kwargs_tup=None, devices=None, backward=False):
r"""Applies each `module` in :attr:`modules` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(flows) == len(inputs)
if kwargs_tup is not None:
assert len(flows) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(flows)
if devices is not None:
assert len(flows) == len(devices)
else:
devices = [None] * len(flows)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, flow, input, kwargs, device=None, back=False):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = flow.backward(*input, **kwargs) if back else flow.forward(*input, **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if len(flows) > 1:
threads = [threading.Thread(target=_worker,
args=(i, flow, input, kwargs, device, backward))
for i, (flow, input, kwargs, device) in
enumerate(zip(flows, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, flows[0], inputs[0], kwargs_tup[0], devices[0], backward)
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
| 2,756 | 33.4625 | 100 | py |
flowseq | flowseq-master/flownmt/flows/couplings/transform.py | import math
from overrides import overrides
from typing import Tuple
import torch
class Transform():
@staticmethod
def fwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
@staticmethod
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
class Additive(Transform):
@staticmethod
@overrides
def fwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
mu = params
z = (z + mu).mul(mask.unsqueeze(2))
logdet = z.new_zeros(z.size(0))
return z, logdet
@staticmethod
@overrides
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
mu = params
z = (z - mu).mul(mask.unsqueeze(2))
logdet = z.new_zeros(z.size(0))
return z, logdet
class Affine(Transform):
@staticmethod
@overrides
def fwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
mu, log_scale = params.chunk(2, dim=2)
scale = log_scale.add_(2.0).sigmoid_()
z = (scale * z + mu).mul(mask.unsqueeze(2))
logdet = scale.log().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1)
return z, logdet
@staticmethod
@overrides
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
mu, log_scale = params.chunk(2, dim=2)
scale = log_scale.add_(2.0).sigmoid_()
z = (z - mu).div(scale + 1e-12).mul(mask.unsqueeze(2))
logdet = scale.log().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1) * -1.0
return z, logdet
def arccosh(x):
return torch.log(x + torch.sqrt(x.pow(2) - 1))
def arcsinh(x):
return torch.log(x + torch.sqrt(x.pow(2) + 1))
class NLSQ(Transform):
# A = 8 * math.sqrt(3) / 9 - 0.05 # 0.05 is a small number to prevent exactly 0 slope
logA = math.log(8 * math.sqrt(3) / 9 - 0.05) # 0.05 is a small number to prevent exactly 0 slope
@staticmethod
def get_pseudo_params(params):
a, logb, cprime, logd, g = params.chunk(5, dim=2)
# for stability
logb = logb.mul_(0.4)
cprime = cprime.mul_(0.3)
logd = logd.mul_(0.4)
# b = logb.add_(2.0).sigmoid_()
# d = logd.add_(2.0).sigmoid_()
# c = (NLSQ.A * b / d).mul(cprime.tanh_())
c = (NLSQ.logA + logb - logd).exp_().mul(cprime.tanh_())
b = logb.exp_()
d = logd.exp_()
return a, b, c, d, g
@staticmethod
@overrides
def fwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
a, b, c, d, g = NLSQ.get_pseudo_params(params)
arg = (d * z).add_(g)
denom = arg.pow(2).add_(1)
c = c / denom
z = (b * z + a + c).mul(mask.unsqueeze(2))
logdet = torch.log(b - 2 * c * d * arg / denom)
logdet = logdet.mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1)
return z, logdet
@staticmethod
@overrides
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[torch.Tensor, torch.Tensor]:
a, b, c, d, g = NLSQ.get_pseudo_params(params)
# double needed for stability. No effect on overall speed
a = a.double()
b = b.double()
c = c.double()
d = d.double()
g = g.double()
z = z.double()
aa = -b * d.pow(2)
bb = (z - a) * d.pow(2) - 2 * b * d * g
cc = (z - a) * 2 * d * g - b * (1 + g.pow(2))
dd = (z - a) * (1 + g.pow(2)) - c
p = (3 * aa * cc - bb.pow(2)) / (3 * aa.pow(2))
q = (2 * bb.pow(3) - 9 * aa * bb * cc + 27 * aa.pow(2) * dd) / (27 * aa.pow(3))
t = -2 * torch.abs(q) / q * torch.sqrt(torch.abs(p) / 3)
inter_term1 = -3 * torch.abs(q) / (2 * p) * torch.sqrt(3 / torch.abs(p))
inter_term2 = 1 / 3 * arccosh(torch.abs(inter_term1 - 1) + 1)
t = t * torch.cosh(inter_term2)
tpos = -2 * torch.sqrt(torch.abs(p) / 3)
inter_term1 = 3 * q / (2 * p) * torch.sqrt(3 / torch.abs(p))
inter_term2 = 1 / 3 * arcsinh(inter_term1)
tpos = tpos * torch.sinh(inter_term2)
t[p > 0] = tpos[p > 0]
z = t - bb / (3 * aa)
arg = d * z + g
denom = arg.pow(2) + 1
logdet = torch.log(b - 2 * c * d * arg / denom.pow(2))
z = z.float().mul(mask.unsqueeze(2))
logdet = logdet.float().mul(mask.unsqueeze(2)).view(z.size(0), -1).sum(dim=1) * -1.0
return z, logdet
| 4,619 | 32.478261 | 101 | py |
flowseq | flowseq-master/flownmt/flows/couplings/coupling.py | from overrides import overrides
from typing import Tuple, Dict
import torch
from flownmt.flows.couplings.blocks import NICEConvBlock, NICERecurrentBlock, NICESelfAttnBlock
from flownmt.flows.flow import Flow
from flownmt.flows.couplings.transform import Transform, Additive, Affine, NLSQ
class NICE(Flow):
"""
NICE Flow
"""
def __init__(self, src_features, features, hidden_features=None, inverse=False, split_dim=2, split_type='continuous', order='up', factor=2,
transform='affine', type='conv', kernel=3, rnn_mode='LSTM', heads=1, dropout=0.0, pos_enc='add', max_length=100):
super(NICE, self).__init__(inverse)
self.features = features
assert split_dim in [1, 2]
assert split_type in ['continuous', 'skip']
if split_dim == 1:
assert split_type == 'skip'
if factor != 2:
assert split_type == 'continuous'
assert order in ['up', 'down']
self.split_dim = split_dim
self.split_type = split_type
self.up = order == 'up'
if split_dim == 2:
out_features = features // factor
in_features = features - out_features
self.z1_channels = in_features if self.up else out_features
else:
in_features = features
out_features = features
self.z1_channels = None
assert transform in ['additive', 'affine', 'nlsq']
if transform == 'additive':
self.transform = Additive
elif transform == 'affine':
self.transform = Affine
out_features = out_features * 2
elif transform == 'nlsq':
self.transform = NLSQ
out_features = out_features * 5
else:
raise ValueError('unknown transform: {}'.format(transform))
if hidden_features is None:
hidden_features = min(2 * in_features, 1024)
assert type in ['conv', 'self_attn', 'rnn']
if type == 'conv':
self.net = NICEConvBlock(src_features, in_features, out_features, hidden_features, kernel_size=kernel, dropout=dropout)
elif type == 'rnn':
self.net = NICERecurrentBlock(rnn_mode, src_features, in_features, out_features, hidden_features, dropout=dropout)
else:
self.net = NICESelfAttnBlock(src_features, in_features, out_features, hidden_features,
heads=heads, dropout=dropout, pos_enc=pos_enc, max_length=max_length)
def split(self, z, mask):
split_dim = self.split_dim
split_type = self.split_type
dim = z.size(split_dim)
if split_type == 'continuous':
return z.split([self.z1_channels, dim - self.z1_channels], dim=split_dim), mask
elif split_type == 'skip':
idx1 = torch.tensor(list(range(0, dim, 2))).to(z.device)
idx2 = torch.tensor(list(range(1, dim, 2))).to(z.device)
z1 = z.index_select(split_dim, idx1)
z2 = z.index_select(split_dim, idx2)
if split_dim == 1:
mask = mask.index_select(split_dim, idx1)
return (z1, z2), mask
else:
raise ValueError('unknown split type: {}'.format(split_type))
def unsplit(self, z1, z2):
split_dim = self.split_dim
split_type = self.split_type
if split_type == 'continuous':
return torch.cat([z1, z2], dim=split_dim)
elif split_type == 'skip':
z = torch.cat([z1, z2], dim=split_dim)
dim = z1.size(split_dim)
idx = torch.tensor([i // 2 if i % 2 == 0 else i // 2 + dim for i in range(dim * 2)]).to(z.device)
return z.index_select(split_dim, idx)
else:
raise ValueError('unknown split type: {}'.format(split_type))
def calc_params(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor):
params = self.net(z, mask, src, src_mask)
return params
def init_net(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0):
params = self.net.init(z, mask, src, src_mask, init_scale=init_scale)
return params
@overrides
def forward(self, input: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, length, in_features]
mask: Tensor
mask tensor [batch, length]
src: Tensor
source input tensor [batch, src_length, src_features]
src_mask: Tensor
source mask tensor [batch, src_length]
Returns: out: Tensor , logdet: Tensor
out: [batch, length, in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
# [batch, length, in_channels]
(z1, z2), mask = self.split(input, mask)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.calc_params(z, mask, src, src_mask)
zp, logdet = self.transform.fwd(zp, mask, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def backward(self, input: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
input: Tensor
input tensor [batch, length, in_features]
mask: Tensor
mask tensor [batch, length]
src: Tensor
source input tensor [batch, src_length, src_features]
src_mask: Tensor
source mask tensor [batch, src_length]
Returns: out: Tensor , logdet: Tensor
out: [batch, length, in_features], the output of the flow
logdet: [batch], the log determinant of :math:`\partial output / \partial input`
"""
# [batch, length, in_channels]
(z1, z2), mask = self.split(input, mask)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.calc_params(z, mask, src, src_mask)
zp, logdet = self.transform.bwd(zp, mask, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def init(self, data: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor, init_scale=1.0) -> Tuple[torch.Tensor, torch.Tensor]:
# [batch, length, in_channels]
(z1, z2), mask = self.split(data, mask)
# [batch, length, features]
z, zp = (z1, z2) if self.up else (z2, z1)
params = self.init_net(z, mask, src, src_mask, init_scale=init_scale)
zp, logdet = self.transform.fwd(zp, mask, params)
z1, z2 = (z, zp) if self.up else (zp, z)
return self.unsplit(z1, z2), logdet
@overrides
def extra_repr(self):
return 'inverse={}, in_channels={}, scale={}'.format(self.inverse, self.in_channels, self.scale)
@classmethod
def from_params(cls, params: Dict) -> "NICE":
return NICE(**params)
NICE.register('nice')
| 7,316 | 40.573864 | 155 | py |
flowseq | flowseq-master/flownmt/flows/couplings/blocks.py | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from flownmt.nnet.weightnorm import Conv1dWeightNorm, LinearWeightNorm
from flownmt.nnet.attention import GlobalAttention, MultiHeadAttention
from flownmt.nnet.positional_encoding import PositionalEncoding
from flownmt.nnet.transformer import TransformerDecoderLayer
class NICEConvBlock(nn.Module):
def __init__(self, src_features, in_features, out_features, hidden_features, kernel_size, dropout=0.0):
super(NICEConvBlock, self).__init__()
self.conv1 = Conv1dWeightNorm(in_features, hidden_features, kernel_size=kernel_size, padding=kernel_size // 2, bias=True)
self.conv2 = Conv1dWeightNorm(hidden_features, hidden_features, kernel_size=kernel_size, padding=kernel_size // 2, bias=True)
self.activation = nn.ELU(inplace=True)
self.attn = GlobalAttention(src_features, hidden_features, hidden_features, dropout=dropout)
self.linear = LinearWeightNorm(hidden_features * 2, out_features, bias=True)
def forward(self, x, mask, src, src_mask):
"""
Args:
x: Tensor
input tensor [batch, length, in_features]
mask: Tensor
x mask tensor [batch, length]
src: Tensor
source input tensor [batch, src_length, src_features]
src_mask: Tensor
source mask tensor [batch, src_length]
Returns: Tensor
out tensor [batch, length, out_features]
"""
out = self.activation(self.conv1(x.transpose(1, 2)))
out = self.activation(self.conv2(out)).transpose(1, 2) * mask.unsqueeze(2)
out = self.attn(out, src, key_mask=src_mask.eq(0))
out = self.linear(torch.cat([x, out], dim=2))
return out
def init(self, x, mask, src, src_mask, init_scale=1.0):
out = self.activation(self.conv1.init(x.transpose(1, 2), init_scale=init_scale))
out = self.activation(self.conv2.init(out, init_scale=init_scale)).transpose(1, 2) * mask.unsqueeze(2)
out = self.attn.init(out, src, key_mask=src_mask.eq(0), init_scale=init_scale)
out = self.linear.init(torch.cat([x, out], dim=2), init_scale=0.0)
return out
class NICERecurrentBlock(nn.Module):
def __init__(self, rnn_mode, src_features, in_features, out_features, hidden_features, dropout=0.0):
super(NICERecurrentBlock, self).__init__()
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
self.rnn = RNN(in_features, hidden_features // 2, batch_first=True, bidirectional=True)
self.attn = GlobalAttention(src_features, hidden_features, hidden_features, dropout=dropout)
self.linear = LinearWeightNorm(in_features + hidden_features, out_features, bias=True)
def forward(self, x, mask, src, src_mask):
lengths = mask.sum(dim=1).long()
packed_out = pack_padded_sequence(x, lengths, batch_first=True, enforce_sorted=False)
packed_out, _ = self.rnn(packed_out)
out, _ = pad_packed_sequence(packed_out, batch_first=True, total_length=mask.size(1))
# [batch, length, out_features]
out = self.attn(out, src, key_mask=src_mask.eq(0))
out = self.linear(torch.cat([x, out], dim=2))
return out
def init(self, x, mask, src, src_mask, init_scale=1.0):
lengths = mask.sum(dim=1).long()
packed_out = pack_padded_sequence(x, lengths, batch_first=True, enforce_sorted=False)
packed_out, _ = self.rnn(packed_out)
out, _ = pad_packed_sequence(packed_out, batch_first=True, total_length=mask.size(1))
# [batch, length, out_features]
out = self.attn.init(out, src, key_mask=src_mask.eq(0), init_scale=init_scale)
out = self.linear.init(torch.cat([x, out], dim=2), init_scale=0.0)
return out
class NICESelfAttnBlock(nn.Module):
def __init__(self, src_features, in_features, out_features, hidden_features, heads, dropout=0.0,
pos_enc='add', max_length=100):
super(NICESelfAttnBlock, self).__init__()
assert pos_enc in ['add', 'attn']
self.src_proj = nn.Linear(src_features, in_features, bias=False) if src_features != in_features else None
self.pos_enc = PositionalEncoding(in_features, padding_idx=None, init_size=max_length + 1)
self.pos_attn = MultiHeadAttention(in_features, heads, dropout=dropout) if pos_enc == 'attn' else None
self.transformer = TransformerDecoderLayer(in_features, hidden_features, heads, dropout=dropout)
self.linear = LinearWeightNorm(in_features, out_features, bias=True)
def forward(self, x, mask, src, src_mask):
if self.src_proj is not None:
src = self.src_proj(src)
key_mask = mask.eq(0)
pos_enc = self.pos_enc(x) * mask.unsqueeze(2)
if self.pos_attn is None:
x = x + pos_enc
else:
x = self.pos_attn(pos_enc, x, x, key_mask)
x = self.transformer(x, key_mask, src, src_mask.eq(0))
return self.linear(x)
def init(self, x, mask, src, src_mask, init_scale=1.0):
if self.src_proj is not None:
src = self.src_proj(src)
key_mask = mask.eq(0)
pos_enc = self.pos_enc(x) * mask.unsqueeze(2)
if self.pos_attn is None:
x = x + pos_enc
else:
x = self.pos_attn(pos_enc, x, x, key_mask)
x = self.transformer.init(x, key_mask, src, src_mask.eq(0), init_scale=init_scale)
x = x * mask.unsqueeze(2)
return self.linear.init(x, init_scale=0.0)
| 5,809 | 44.748031 | 133 | py |
flowseq | flowseq-master/flownmt/optim/lr_scheduler.py | from torch.optim.optimizer import Optimizer
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
last_epoch = 0
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_lr(self):
raise NotImplementedError
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
class InverseSquareRootScheduler(_LRScheduler):
"""
Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from zero until the configured learning rate (``--lr``).
Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(0, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = args.lr * sqrt(args.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, optimizer, warmup_steps, init_lr, last_epoch=-1):
assert warmup_steps > 0, 'warmup steps should be larger than 0.'
super(InverseSquareRootScheduler, self).__init__(optimizer, last_epoch)
self.warmup_steps = float(warmup_steps)
self.init_lr = init_lr
self.lr_steps = [(base_lr - init_lr) / warmup_steps for base_lr in self.base_lrs]
self.decay_factor = self.warmup_steps ** 0.5
if last_epoch == -1:
last_epoch = 0
self.step(last_epoch)
def get_lr(self):
if self.last_epoch < self.warmup_steps:
return [self.init_lr + lr_step * self.last_epoch for lr_step in self.lr_steps]
else:
lr_factor = self.decay_factor * self.last_epoch**-0.5
return [base_lr * lr_factor for base_lr in self.base_lrs]
class ExponentialScheduler(_LRScheduler):
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma every epoch. When last_epoch=-1, sets initial lr as lr.
We also support a warmup phase where we linearly increase the learning rate
from zero until the configured learning rate (``--lr``).
Args:
optimizer (Optimizer): Wrapped optimizer.
gamma (float): Multiplicative factor of learning rate decay.
warmup_steps (int): Warmup steps..
last_epoch (int): The index of last epoch. Default: -1.
"""
def __init__(self, optimizer, gamma, warmup_steps, init_lr, last_epoch=-1):
super(ExponentialScheduler, self).__init__(optimizer, last_epoch)
self.gamma = gamma
# handle warmup <= 0
self.warmup_steps = max(1, warmup_steps)
self.init_lr = init_lr
self.lr_steps = [(base_lr - init_lr) / self.warmup_steps for base_lr in self.base_lrs]
if last_epoch == -1:
last_epoch = 0
self.step(last_epoch)
def get_lr(self):
if self.last_epoch < self.warmup_steps:
return [self.init_lr + lr_step * self.last_epoch for lr_step in self.lr_steps]
else:
lr_factor = self.gamma ** (self.last_epoch - self.warmup_steps)
return [base_lr * lr_factor for base_lr in self.base_lrs]
| 4,603 | 40.477477 | 94 | py |
flowseq | flowseq-master/flownmt/optim/adamw.py | import math
import torch
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
Adam has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'], p.data)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 4,811 | 41.584071 | 116 | py |
flowseq | flowseq-master/flownmt/nnet/weightnorm.py | from overrides import overrides
import torch
import torch.nn as nn
class LinearWeightNorm(nn.Module):
"""
Linear with weight normalization
"""
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
def init(self, x, init_scale=1.0):
with torch.no_grad():
# [batch, out_features]
out = self(x).view(-1, self.linear.out_features)
# [out_features]
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-6)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
class Conv1dWeightNorm(nn.Module):
"""
Conv1d with weight normalization
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv1dWeightNorm, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.conv.weight, mean=0.0, std=0.05)
if self.conv.bias is not None:
nn.init.constant_(self.conv.bias, 0)
self.conv = nn.utils.weight_norm(self.conv)
def init(self, x, init_scale=1.0):
with torch.no_grad():
# [batch, n_channels, L]
out = self(x)
n_channels = out.size(1)
out = out.transpose(0, 1).contiguous().view(n_channels, -1)
# [n_channels]
mean = out.mean(dim=1)
std = out.std(dim=1)
inv_stdv = init_scale / (std + 1e-6)
self.conv.weight_g.mul_(inv_stdv.view(n_channels, 1, 1))
if self.conv.bias is not None:
self.conv.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.conv(input)
@overrides
def extra_repr(self):
return self.conv.extra_repr()
| 2,806 | 32.819277 | 91 | py |
flowseq | flowseq-master/flownmt/nnet/transformer.py | import torch.nn as nn
from flownmt.nnet.attention import MultiHeadAttention, PositionwiseFeedForward
class TransformerEncoderLayer(nn.Module):
def __init__(self, model_dim, hidden_dim, heads, dropout=0.0, mask_diag=False):
super(TransformerEncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(model_dim, heads, dropout=dropout, mask_diag=mask_diag)
self.pos_ffn = PositionwiseFeedForward(model_dim, hidden_dim, dropout=dropout)
def forward(self, x, mask):
out = self.slf_attn(x, x, x, key_mask=mask)
out = self.pos_ffn(out)
return out
def init(self, x, mask, init_scale=1.0):
out = self.slf_attn.init(x, x, x, key_mask=mask, init_scale=init_scale)
out = self.pos_ffn.init(out, init_scale=init_scale)
return out
class TransformerDecoderLayer(nn.Module):
def __init__(self, model_dim, hidden_dim, heads, dropout=0.0, mask_diag=False):
super(TransformerDecoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(model_dim, heads, dropout=dropout, mask_diag=mask_diag)
self.enc_attn = MultiHeadAttention(model_dim, heads, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(model_dim, hidden_dim, dropout=dropout)
def forward(self, x, mask, src, src_mask):
out = self.slf_attn(x, x, x, key_mask=mask)
out = self.enc_attn(out, src, src, key_mask=src_mask)
out = self.pos_ffn(out)
return out
def init(self, x, mask, src, src_mask, init_scale=1.0):
out = self.slf_attn.init(x, x, x, key_mask=mask, init_scale=init_scale)
out = self.enc_attn.init(out, src, src, key_mask=src_mask, init_scale=init_scale)
out = self.pos_ffn.init(out, init_scale=init_scale)
return out
| 1,784 | 42.536585 | 98 | py |
flowseq | flowseq-master/flownmt/nnet/positional_encoding.py | import math
import torch
import torch.nn as nn
from flownmt.utils import make_positions
class PositionalEncoding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, encoding_dim, padding_idx, init_size=1024):
super().__init__()
self.encoding_dim = encoding_dim
self.padding_idx = padding_idx
self.weights = PositionalEncoding.get_embedding(
init_size,
encoding_dim,
padding_idx,
)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
@staticmethod
def get_embedding(num_encodings, encoding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = encoding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_encodings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_encodings, -1)
if encoding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_encodings, 1)], dim=1)
emb[0, :] = 0
return emb
def forward(self, x):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = x.size()[:2]
max_pos = seq_len + 1
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = PositionalEncoding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.type_as(self._float_tensor)
if self.padding_idx is None:
return self.weights[1:seq_len + 1].detach()
else:
positions = make_positions(x, self.padding_idx)
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
| 2,348 | 36.285714 | 99 | py |
flowseq | flowseq-master/flownmt/nnet/layer_norm.py | import torch
import torch.nn as nn
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if not export and torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return nn.LayerNorm(normalized_shape, eps, elementwise_affine)
| 428 | 32 | 81 | py |
flowseq | flowseq-master/flownmt/nnet/criterion.py | import torch.nn.functional as F
import torch.nn as nn
class LabelSmoothedCrossEntropyLoss(nn.Module):
"""
Cross Entropy loss with label smoothing.
For training, the loss is smoothed with parameter eps, while for evaluation, the smoothing is disabled.
"""
def __init__(self, label_smoothing):
super(LabelSmoothedCrossEntropyLoss, self).__init__()
self.eps = label_smoothing
def forward(self, input, target):
# [batch, c, d1, ..., dk]
loss = F.log_softmax(input, dim=1) * -1.
# [batch, d1, ..., dk]
nll_loss = loss.gather(dim=1, index=target.unsqueeze(1)).squeeze(1)
if self.training:
# [batch, c, d1, ..., dk]
inf_mask = loss.eq(float('inf'))
# [batch, d1, ..., dk]
smooth_loss = loss.masked_fill(inf_mask, 0.).sum(dim=1)
eps_i = self.eps / (1.0 - inf_mask.float()).sum(dim=1)
return nll_loss * (1. - self.eps) + smooth_loss * eps_i
else:
return nll_loss
| 1,029 | 35.785714 | 107 | py |
flowseq | flowseq-master/flownmt/nnet/attention.py | from overrides import overrides
import torch
from torch.nn import Parameter
import torch.nn as nn
import torch.nn.functional as F
from flownmt.nnet.layer_norm import LayerNorm
class GlobalAttention(nn.Module):
"""
Global Attention between encoder and decoder
"""
def __init__(self, key_features, query_features, value_features, hidden_features=None, dropout=0.0):
"""
Args:
key_features: int
dimension of keys
query_features: int
dimension of queries
value_features: int
dimension of values (outputs)
hidden_features: int
dimension of hidden states (default value_features)
dropout: float
dropout rate
"""
super(GlobalAttention, self).__init__()
if hidden_features is None:
hidden_features = value_features
self.key_proj = nn.Linear(key_features, 2 * hidden_features, bias=True)
self.query_proj = nn.Linear(query_features, hidden_features, bias=True)
self.dropout = dropout
self.fc = nn.Linear(hidden_features, value_features)
self.hidden_features = hidden_features
self.reset_parameters()
def reset_parameters(self):
# key proj
nn.init.xavier_uniform_(self.key_proj.weight)
nn.init.constant_(self.key_proj.bias, 0)
# query proj
nn.init.xavier_uniform_(self.query_proj.weight)
nn.init.constant_(self.query_proj.bias, 0)
# fc
nn.init.xavier_uniform_(self.fc.weight)
nn.init.constant_(self.fc.bias, 0)
@overrides
def forward(self, query, key, key_mask=None):
"""
Args:
query: Tensor
query tensor [batch, query_length, query_features]
key: Tensor
key tensor [batch, key_length, key_features]
key_mask: ByteTensor or None
binary ByteTensor [batch, src_len] padding elements are indicated by 1s.
Returns: Tensor
value tensor [batch, query_length, value_features]
"""
bs, timesteps, _ = key.size()
dim = self.hidden_features
# [batch, query_length, dim]
query = self.query_proj(query)
# [batch, key_length, 2 * dim]
c = self.key_proj(key)
# [batch, key_length, 2, dim]
c = c.view(bs, timesteps, 2, dim)
# [batch, key_length, dim]
key = c[:, :, 0]
value = c[:, :, 1]
# attention weights [batch, query_length, key_length]
attn_weights = torch.bmm(query, key.transpose(1, 2))
if key_mask is not None:
attn_weights = attn_weights.masked_fill(key_mask.unsqueeze(1), float('-inf'))
attn_weights = F.softmax(attn_weights.float(), dim=-1,
dtype=torch.float32 if attn_weights.dtype == torch.float16 else attn_weights.dtype)
# values [batch, query_length, dim]
out = torch.bmm(attn_weights, value)
out = F.dropout(self.fc(out), p=self.dropout, training=self.training)
return out
def init(self, query, key, key_mask=None, init_scale=1.0):
with torch.no_grad():
return self(query, key, key_mask=key_mask)
class MultiHeadAttention(nn.Module):
"""
Multi-head Attention
"""
def __init__(self, model_dim, heads, dropout=0.0, mask_diag=False):
"""
Args:
model_dim: int
the input dimension for keys, queries and values
heads: int
number of heads
dropout: float
dropout rate
"""
super(MultiHeadAttention, self).__init__()
self.model_dim = model_dim
self.head_dim = model_dim // heads
self.heads = heads
self.dropout = dropout
self.mask_diag = mask_diag
assert self.head_dim * heads == self.model_dim, "model_dim must be divisible by number of heads"
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.empty(3 * model_dim, model_dim))
self.in_proj_bias = Parameter(torch.empty(3 * model_dim))
self.layer_norm = LayerNorm(model_dim)
self.reset_parameters()
def reset_parameters(self):
# in proj
nn.init.xavier_uniform_(self.in_proj_weight[:self.model_dim, :])
nn.init.xavier_uniform_(self.in_proj_weight[self.model_dim:(self.model_dim * 2), :])
nn.init.xavier_uniform_(self.in_proj_weight[(self.model_dim * 2):, :])
nn.init.constant_(self.in_proj_bias, 0.)
def forward(self, query, key, value, key_mask=None):
"""
Args:
query: Tenfor
[batch, tgt_len, model_dim]
key: Tensor
[batch, src_len, model_dim]
value: Tensor
[batch, src_len, model_dim]
key_mask: ByteTensor or None
binary ByteTensor [batch, src_len] padding elements are indicated by 1s.
Returns:
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
bs, src_len, model_dim = key.size()
tgt_len = query.size(1)
heads = self.heads
residual = query
# k, v: [bs, src_len, model_dim]
# q: [bs, tgt_len, model_dim]
if qkv_same:
# self-attention
q, k, v = self._in_proj_qkv(query)
elif kv_same:
# encoder-decoder attention
q = self._in_proj_q(query)
k, v = self._in_proj_kv(key)
else:
q = self._in_proj_q(query)
k = self._in_proj_k(key)
v = self._in_proj_v(value)
q *= self.scaling
model_dim = q.size(2)
dim = model_dim // heads
# [len, batch, model_dim] -> [len, batch * heads, dim] -> [batch * heads, len, dim]
q = q.transpose(0, 1).contiguous().view(tgt_len, bs * heads, dim).transpose(0, 1)
k = k.transpose(0, 1).contiguous().view(src_len, bs * heads, dim).transpose(0, 1)
v = v.transpose(0, 1).contiguous().view(src_len, bs * heads, dim).transpose(0, 1)
# attention weights [batch * heads, tgt_len, src_len]
attn_weights = torch.bmm(q, k.transpose(1, 2))
if key_mask is not None:
attn_weights = attn_weights.view(bs, heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(key_mask.unsqueeze(1).unsqueeze(2), float('-inf'))
attn_weights = attn_weights.view(bs * heads, tgt_len, src_len)
if self.mask_diag:
assert tgt_len == src_len
# [1, tgt_len, tgt_len]
diag_mask = torch.eye(tgt_len, device=query.device, dtype=torch.uint8).unsqueeze(0)
attn_weights = attn_weights.masked_fill(diag_mask, float('-inf'))
attn_weights = F.softmax(attn_weights.float(), dim=-1,
dtype=torch.float32 if attn_weights.dtype == torch.float16 else attn_weights.dtype)
# outputs [batch * heads, tgt_len, dim]
out = torch.bmm(attn_weights, v)
# merge heads
# [batch, heads, tgt_len, dim] -> [batch, tgt_len, heads, dim]
# -> [batch, tgt_len, model_dim]
out = out.view(bs, heads, tgt_len, dim).transpose(1, 2).contiguous().view(bs, tgt_len, model_dim)
out = F.dropout(out, p=self.dropout, training=self.training)
out = self.layer_norm(out + residual)
return out
def init(self, query, key, value, key_mask=None, init_scale=1.0):
with torch.no_grad():
return self(query, key, value, key_mask=key_mask)
def _in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def _in_proj_kv(self, key):
return self._in_proj(key, start=self.model_dim).chunk(2, dim=-1)
def _in_proj_q(self, query):
return self._in_proj(query, end=self.model_dim)
def _in_proj_k(self, key):
return self._in_proj(key, start=self.model_dim, end=2 * self.model_dim)
def _in_proj_v(self, value):
return self._in_proj(value, start=2 * self.model_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
class PositionwiseFeedForward(nn.Module):
def __init__(self, features, hidden_features, dropout=0.0):
super(PositionwiseFeedForward, self).__init__()
self.linear1 = nn.Linear(features, hidden_features)
self.dropout = dropout
self.linear2 = nn.Linear(hidden_features, features)
self.layer_norm = LayerNorm(features)
def forward(self, x):
residual = x
x = F.relu(self.linear1(x), inplace=True)
x = F.dropout(x, p=self.dropout, training=self.training)
x = F.dropout(self.linear2(x), p=self.dropout, training=self.training)
x = self.layer_norm(residual + x)
return x
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
| 9,245 | 35.401575 | 116 | py |
flowseq | flowseq-master/flownmt/data/dataloader.py | import codecs
import math
import random
from collections import defaultdict
import numpy as np
import torch
import os
def get_sorted_wordlist(path):
freqs = defaultdict(lambda: 0)
with codecs.open(path, "r", encoding="utf-8") as fin:
for line in fin:
words = line.strip().split()
for word in words:
freqs[word] += 1
sorted_words = sorted(freqs, key=freqs.get, reverse=True)
wordlist = [word for word in sorted_words]
return wordlist
UNK = "<unk>"
EOS = "<eos>"
PAD = "<pad>"
SRC_PAD = PAD
TGT_PAD = PAD
class NMTDataSet():
def __init__(self, data_path, src_lang, tgt_lang, src_vocab_path, tgt_vocab_path, src_max_vocab, tgt_max_vocab,
subword, create_vocab):
self.train_src_path = os.path.join(data_path, 'train.{}'.format(src_lang))
self.train_tgt_path = os.path.join(data_path, 'train.{}'.format(tgt_lang))
self.dev_src_path = os.path.join(data_path, 'dev.{}'.format(src_lang))
self.dev_tgt_path = os.path.join(data_path, 'dev.{}'.format(tgt_lang))
self.test_src_path = os.path.join(data_path, 'test.{}'.format(src_lang))
self.test_tgt_path = os.path.join(data_path, 'test.{}'.format(tgt_lang))
self.subword = subword
if "bpe" in subword:
self.dev_tgt_path_ori = os.path.join(data_path, 'dev.{}.ori'.format(tgt_lang))
self.test_tgt_path_ori = os.path.join(data_path, 'test.{}.ori'.format(tgt_lang))
else:
self.dev_tgt_path_ori = self.dev_tgt_path
self.test_tgt_path_ori = self.test_tgt_path
if not create_vocab:
assert src_vocab_path is not None and tgt_vocab_path is not None and os.path.exists(src_vocab_path) and os.path.exists(tgt_vocab_path)
self.src_word2id, self.src_id2word = self.load_vocab(src_vocab_path)
self.tgt_word2id, self.tgt_id2word = self.load_vocab(tgt_vocab_path)
else:
if subword == "joint-bpe":
joint_path = os.path.join(data_path, "joint.tmp")
os.system("cat %s %s > %s" % (self.train_src_path, self.train_tgt_path, joint_path))
assert src_max_vocab == tgt_max_vocab, "src max vocab size != tgt max vocab size"
word2id, id2word = self.get_vocab(joint_path, src_max_vocab, has_pad=True)
os.remove(joint_path)
self.src_word2id = self.tgt_word2id = word2id
self.src_id2word = self.tgt_id2word = id2word
else:
if subword == "sep-bpe":
assert src_max_vocab == tgt_max_vocab, "src max vocab size != tgt max vocab size"
self.src_word2id, self.src_id2word = self.get_vocab(self.train_src_path, src_max_vocab, has_pad=True)
self.tgt_word2id, self.tgt_id2word = self.get_vocab(self.train_tgt_path, tgt_max_vocab, has_pad=True)
if src_vocab_path is not None and tgt_vocab_path is not None:
self.save_vocab(self.src_id2word, src_vocab_path)
self.save_vocab(self.tgt_id2word, tgt_vocab_path)
self.src_vocab_size = len(self.src_word2id)
self.tgt_vocab_size = len(self.tgt_word2id)
self.src_pad_idx = self.src_word2id[SRC_PAD]
self.tgt_pad_idx = self.tgt_word2id[TGT_PAD]
print(f"Source vocab size={len(self.src_word2id)}, target vocab size={len(self.tgt_word2id)}")
def load_vocab(self, path):
word2id = {}
i = 0
with codecs.open(path, "r", "utf-8") as fin:
for line in fin:
word2id[line.strip()] = i
i += 1
id2word = {v: k for k, v in word2id.items()}
return word2id, id2word
def save_vocab(self, id2word, path):
print(f"Saving vocab to {path}")
with codecs.open(path, "w", encoding="utf-8") as fout:
for i in range(len(id2word)):
fout.write(id2word[i] + "\n")
def get_vocab(self, path, max_vocab=-1, has_pad=True):
if max_vocab > 0:
max_vocab = max_vocab - 3 if has_pad else max_vocab - 2
wordlist = get_sorted_wordlist(path)
if max_vocab > 0:
wordlist = wordlist[:max_vocab]
word2id = {}
if has_pad:
word2id[PAD] = 0
word2id[UNK] = len(word2id)
word2id[EOS] = len(word2id)
for word in wordlist:
word2id[word] = len(word2id)
id2word = {i: word for word, i in word2id.items()}
return word2id, id2word
def dump_to_file(self, ms, lengths, path, post_edit=True):
# ms: list of (batch_size, sent_len)
with codecs.open(path, "w", encoding="utf-8") as fout:
for m, length in zip(ms, lengths):
m = m.cpu().numpy()
length = length.cpu().numpy()
for line, l in zip(m, length):
sent = []
for w in line[:l]:
word = self.tgt_id2word[w]
if word == EOS:
break
sent.append(word)
if post_edit and (self.subword == "sep-bpe" or self.subword == "joint-bpe"):
line = ' '.join(sent)
line = line.replace('@@ ', '').strip()
if line.endswith("@@"):
line = line[-2:]
elif post_edit and (self.subword == "joint-spm"):
line = ''.join(sent)
line = line.replace('▁', ' ').strip()
else:
line = " ".join(sent)
fout.write(line + "\n")
def max_tok_len(example, count):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: [w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(example[0]) + 1)
# Tgt: [w1 ... wM <eos>]
max_tgt_in_batch = max(max_tgt_in_batch, len(example[1]) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
def batch_iter(data, batch_size, batch_size_fn=None, shuffle=False):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch))
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if overflowed == 0:
yield minibatch
minibatch, size_so_far = [], 0
else:
yield minibatch[:-overflowed]
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1)
if minibatch:
yield minibatch
def bucket_batch_iter(data, batch_size, batch_size_fn=None, shuffle=False):
"""Yield elements from data in chunks of batch_size, where each chunk size
is a multiple of batch_size_multiple.
This is an extended version of torchtext.data.batch.
"""
if batch_size_fn is None:
def batch_size_fn(new, count):
return count
buckets = [20, 40, 60, 80]
bucket_data = [[] for _ in buckets]
outliers = []
for ex in data:
tgt_len = len(ex[1])
if tgt_len > buckets[-1]:
outliers.append(ex)
continue
for bid, bl in enumerate(buckets):
if tgt_len <= bl:
bucket_data[bid].append(ex)
break
if len(outliers) > 0:
bucket_data.append(outliers)
batches, minibatch, size_so_far = [], [], 0
for bucket in bucket_data:
if shuffle:
random.shuffle(bucket)
for ex in bucket:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch))
if size_so_far >= batch_size:
overflowed = 0
if size_so_far > batch_size:
overflowed += 1
if overflowed == 0:
batches.append(minibatch)
minibatch, size_so_far = [], 0
else:
batches.append(minibatch[:-overflowed])
minibatch = minibatch[-overflowed:]
size_so_far = 0
for i, ex in enumerate(minibatch):
size_so_far = batch_size_fn(ex, i + 1)
if minibatch:
batches.append(minibatch)
if shuffle:
random.shuffle(batches)
for minibatch in batches:
yield minibatch
class DataIterator():
def __init__(self, dataset, batch_size, batch_by_tokens, max_src_length, max_tgt_length, buffer_multiple_size,
device, model_path, len_diff=-1, len_ratio=-1, multi_scale=1, corpus="train",
bucket_data=True, rank=-1, num_replicas=0):
self.train = False # need shuffle and sort
self.device = device
if corpus == "train":
self.src_path = dataset.train_src_path
self.tgt_path = dataset.train_tgt_path
self.tgt_path_ori = None
self.train = True
elif corpus == "dev":
self.src_path = dataset.dev_src_path
self.tgt_path = dataset.dev_tgt_path
self.tgt_path_ori = dataset.dev_tgt_path_ori
elif corpus == "test":
self.src_path = dataset.test_src_path
self.tgt_path = dataset.test_tgt_path
self.tgt_path_ori = dataset.test_tgt_path_ori
else:
raise ValueError
self.corpus = corpus
self.batch_size = batch_size
self.batch_size_fn = max_tok_len if batch_by_tokens else None
self.max_src_length = max_src_length
self.max_tgt_length = max_tgt_length
self.len_diff = len_diff
self.len_ratio = len_ratio
self.multi_scale = multi_scale
self.src_word2id = dataset.src_word2id
self.tgt_word2id = dataset.tgt_word2id
if rank < 0:
assert num_replicas == 0
else:
assert corpus == 'train'
assert rank < num_replicas
assert self.tgt_path_ori is None
self.rank = rank
self.num_replicas = num_replicas
self.data_size, self.data = self.get_dataset()
self.batches = None
if self.train:
self.buffer_size = buffer_multiple_size * self.batch_size
assert buffer_multiple_size > 0
else:
self.buffer_size = -1
self.src_pad_idx = self.src_word2id[SRC_PAD]
self.tgt_pad_idx = self.tgt_word2id[TGT_PAD]
self.bucket = bucket_data
self.sents_num = 0
self.tgt_sort_origin_path = os.path.join(model_path, os.path.basename(self.tgt_path) + ".sort")
def filter_sents(self, s_tokens, t_tokens):
if self.max_tgt_length > 0 and self.max_src_length > 0:
if len(s_tokens) + 1 > self.max_src_length or len(t_tokens) + 1 > self.max_tgt_length:
return True
if self.len_diff > 0:
if abs(len(s_tokens) - len(t_tokens)) > self.len_diff:
return True
if self.len_ratio > 0:
ratio = len(t_tokens) / len(s_tokens)
if ratio > self.len_ratio or ratio < (1. / self.len_ratio):
return True
return False
def pad_tgt(self, tgt):
scale = self.multi_scale
tgt_len = len(tgt)
res = tgt_len % scale if tgt_len % scale > 0 else scale
tgt_len = (scale - res) + tgt_len
tgt = tgt + [self.tgt_word2id[EOS]] * (tgt_len - len(tgt))
return tgt
def get_dataset(self):
count = 0
data = []
outliers = 0
src_path, tgt_path = self.src_path, self.tgt_path
tgt_ori_path = self.tgt_path_ori
ftgt_ori = None if tgt_ori_path is None else codecs.open(tgt_ori_path, "r", encoding="utf-8")
with codecs.open(src_path, "r", encoding="utf-8") as fsrc, codecs.open(tgt_path, "r", encoding="utf-8") as ftgt:
for id, (s, t) in enumerate(zip(fsrc, ftgt)):
if self.num_replicas > 0 and id % self.num_replicas != self.rank:
continue
s_tokens = s.strip().split()
t_tokens = t.strip().split()
t_ori = ftgt_ori.readline().strip() if ftgt_ori is not None else None
src = [self.src_word2id[word] if word in self.src_word2id else self.src_word2id[UNK] for word in s_tokens] + [self.src_word2id[EOS]]
tgt = [self.tgt_word2id[word] if word in self.tgt_word2id else self.tgt_word2id[UNK] for word in t_tokens] #+ [self.tgt_word2id[EOS]]
tgt = self.pad_tgt(tgt)
if self.train and self.filter_sents(src, tgt):
outliers += 1
continue
else:
if not self.train:
data.append((src, tgt, t_ori))
if self.filter_sents(src, tgt):
outliers += 1
else:
data.append((src, tgt))
count += 1
print(f"Load total {count} sentences pairs, {outliers} are out of maximum sentence length!")
return count, data
def batch(self, batch_size):
"""Yield elements from data in chunks of batch_size."""
batch_size_fn = self.batch_size_fn
if batch_size_fn is None:
def batch_size_fn(new, count):
return count
minibatch, size_so_far = [], 0
for ex in self.data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch))
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1)
if minibatch:
yield minibatch
def process_batch(self, minibatch):
# padding and make mask of minibatch
# return: batch_size x max_len
# minibatch = sorted(minibatch, key=lambda x: len(x[1]), reverse=True)
src_max_len = max([len(d[0]) for d in minibatch])
tgt_max_len = max([len(d[1]) for d in minibatch])
padded_src, padded_tgt = [], []
src_mask = []
tgt_mask = []
for d in minibatch:
s, t = d[0], d[1]
padded_src.append(s + [self.src_pad_idx] * (src_max_len - len(s)))
padded_tgt.append(t + [self.tgt_pad_idx] * (tgt_max_len - len(t)))
src_mask.append([1.] * len(s) + [0.] * (src_max_len - len(s)))
tgt_mask.append([1.] * len(t) + [0.] * (tgt_max_len - len(t)))
padded_src = torch.from_numpy(np.array(padded_src)).long().to(self.device)
padded_tgt = torch.from_numpy(np.array(padded_tgt)).long().to(self.device)
src_mask = torch.from_numpy(np.array(src_mask)).float().to(self.device)
tgt_mask = torch.from_numpy(np.array(tgt_mask)).float().to(self.device)
return padded_src, padded_tgt, src_mask, tgt_mask
def init_epoch(self):
# NOTE: `rnn.pack_padded_sequence` requires that a minibatch
# be sorted by decreasing order, which requires reversing
# relative to typical sort keys
if self.train:
def _pool():
for p in self.batch(self.buffer_size):
if self.bucket:
p_batch = bucket_batch_iter(p,
self.batch_size,
batch_size_fn=self.batch_size_fn, shuffle=True)
else:
p_batch = batch_iter(random.sample(p, len(p)),
self.batch_size,
batch_size_fn=self.batch_size_fn)
p_batch = list(p_batch)
for b in p_batch:
yield b
self.batches = _pool()
else:
if self.batches is None:
self.batches = []
else:
self.batches.clear()
iter_func = bucket_batch_iter if self.bucket else batch_iter
for b in iter_func(
self.data,
self.batch_size,
batch_size_fn=self.batch_size_fn):
# self.batches.append(sorted(b, key=lambda x: len(x[1]), reverse=True))
self.batches.append(b)
def __iter__(self):
while True:
self.init_epoch()
tgt_ori_sents = []
for idx, minibatch in enumerate(self.batches):
self.sents_num += len(minibatch)
if not self.train:
tgt_ori_sents.append([d[2] for d in minibatch])
src_batch, tgt_batch, src_mask, tgt_mask = self.process_batch(minibatch)
yield src_batch, tgt_batch, src_mask, tgt_mask
if not self.train:
with codecs.open(self.tgt_sort_origin_path, "w", encoding="utf-8") as fout:
for b in tgt_ori_sents:
for sent in b:
fout.write(sent + "\n")
return
def get_batch(self, batch_size):
batch = random.sample(self.data, batch_size)
return self.process_batch(batch)
@property
def epoch(self):
return self.sents_num * 1. / self.data_size
def __len__(self):
if self.batch_size_fn is not None:
raise NotImplementedError
return math.ceil(self.data_size / self.batch_size)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--train_src_path", type=str, default=None)
parser.add_argument("--train_tgt_path", type=str, default=None)
parser.add_argument("--dev_src_path", type=str, default=None)
parser.add_argument("--dev_tgt_path", type=str, default=None)
parser.add_argument("--test_src_path", type=str, default=None)
parser.add_argument("--test_tgt_path", type=str, default=None)
parser.add_argument("--src_vocab_path", type=str, default="src.vocab")
parser.add_argument("--tgt_vocab_path", type=str, default="tgt.vocab")
parser.add_argument("--batch_size", type=int, default=50)
parser.add_argument("--batch_by_tokens", type=int, default=1, help="0 is False")
parser.add_argument("--max_src_length", type=int, default=80)
parser.add_argument("--max_tgt_length", type=int, default=80)
parser.add_argument("--buffer_multiple_size", type=int, default=3)
parser.add_argument("--src_max_vocab", type=int, default=50000)
parser.add_argument("--tgt_max_vocab", type=int, default=50000)
parser.add_argument("--create_vocab", type=int, default=0)
args = parser.parse_args()
model_path = "debug"
dataset = NMTDataSet(args.train_src_path, args.train_tgt_path, args.dev_src_path, args.dev_tgt_path,
args.test_src_path, args.test_tgt_path, args.src_vocab_path, args.tgt_vocab_path,
args.src_max_vocab, args.tgt_max_vocab, args.create_vocab)
train_iterator = DataIterator(dataset, args.batch_size, args.batch_by_tokens, args.max_src_length, args.max_tgt_length,
args.buffer_multiple_size, device="cpu", model_path=model_path, corpus="train")
dev_iterator = DataIterator(dataset, args.batch_size, args.batch_by_tokens, args.max_src_length, args.max_tgt_length,
args.buffer_multiple_size, device="cpu", model_path=model_path, corpus="dev")
# test_iterator = DataIterator(dataset, args, device="cpu", corpus="test")
def _print(batch, id2word):
for sent in batch:
if id2word is None:
print(" ".join([str(i) for i in sent]) + "\n")
else:
print(" ".join([id2word[w] for w in sent]) + "\n")
step = 0
for src_batch, tgt_batch, src_mask in train_iterator:
print("Epoch = %f\n" % train_iterator.epoch)
print("---src batch %d ----" % step)
_print(src_batch.numpy(), dataset.src_id2word)
print("---tgt batch %d ----" % step)
_print(tgt_batch.numpy(), dataset.tgt_id2word)
print("---src mask %d ----" % step)
_print(src_mask.numpy(), None)
step += 1
if step % 10 == 0:
break
print("############### Dev ###############")
step = 0
for src_batch, tgt_batch, src_mask in dev_iterator:
print("Epoch = %f\n" % dev_iterator.epoch)
print("---src batch %d ----" % step)
_print(src_batch.numpy(), dataset.src_id2word)
print("---tgt batch %d ----" % step)
_print(tgt_batch.numpy(), dataset.tgt_id2word)
print("---src mask %d ----" % step)
_print(src_mask.numpy(), None)
step += 1
| 21,852 | 39.097248 | 149 | py |
flowseq | flowseq-master/experiments/nmt.py | import os
import sys
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import time
import json
import random
import math
import numpy as np
import torch
from torch.nn.utils import clip_grad_norm_
import torch.distributed as dist
from flownmt.data import NMTDataSet, DataIterator
from flownmt import FlowNMT
from flownmt.utils import total_grad_norm
from flownmt.optim import AdamW, InverseSquareRootScheduler, ExponentialScheduler
from experiments.options import parse_args
def logging(info, logfile):
print(info)
print(info, file=logfile)
logfile.flush()
def get_optimizer(learning_rate, parameters, betas, eps, amsgrad, weight_decay, lr_decay, warmup_steps, init_lr):
optimizer = AdamW(parameters, lr=learning_rate, betas=betas, eps=eps, amsgrad=amsgrad, weight_decay=weight_decay)
if lr_decay == 'inv_sqrt':
scheduler = InverseSquareRootScheduler(optimizer, warmup_steps, init_lr)
elif lr_decay == 'expo':
step_decay = 0.999995
scheduler = ExponentialScheduler(optimizer, step_decay, warmup_steps, init_lr)
else:
raise ValueError('unknown lr decay method: %s' % lr_decay)
return optimizer, scheduler
def calc_bleu(fref, fmt, result_path):
script = os.path.join(current_path, 'scripts/multi-bleu.perl')
temp = os.path.join(result_path, 'tmp')
os.system("perl %s %s < %s > %s" % (script, fref, fmt, temp))
bleu = open(temp, 'r').read().strip()
bleu = bleu.split(",")[0].split("=")
if len(bleu) < 2:
return 0.0
bleu = float(bleu[1].strip())
return bleu
def translate(epoch, dataset, dataloader, flownmt, result_path, log):
flownmt.eval()
taus = [0.0,]
bleu = 0
logging('argmax translating...', log)
for tau in taus:
n_tr = 8 if tau > 1e-4 else 1
translations = []
lengths = []
length_err = 0
num_insts = 0
start_time = time.time()
for src, tgt, src_masks, tgt_masks in dataloader:
trans, lens = flownmt.translate_argmax(src, src_masks, n_tr=n_tr, tau=tau)
translations.append(trans)
lengths.append(lens)
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
time_cost = time.time() - start_time
outfile = os.path.join(result_path, 'trans{}.t{:.1f}.mt'.format(epoch, 0.0))
dataset.dump_to_file(translations, lengths, outfile)
b = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
logging('#SENT: {}, Tau: {:.1f}, Length Err: {:.1f}, BLEU: {:.2f}, time: {:.1f}s'.format(
num_insts, tau, length_err / num_insts, b, time_cost), log)
if bleu < b:
bleu = b
taus = []
if len(taus) > 0:
logging('importance weighted translating...', log)
n_len = 3
iwk = 4
for tau in taus:
n_tr = 8 if tau > 1e-4 else 1
translations = []
lengths = []
length_err = 0
num_insts = 0
start_time = time.time()
for src, tgt, src_masks, tgt_masks in dataloader:
trans, lens = flownmt.translate_iw(src, src_masks, n_len=n_len, n_tr=n_tr, tau=tau, k=iwk)
translations.append(trans)
lengths.append(lens)
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
time_cost = time.time() - start_time
outfile = os.path.join(result_path, 'trans{}.t{:.1f}.mt'.format(epoch, tau))
dataset.dump_to_file(translations, lengths, outfile)
b = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
logging('Temperature: {:.1f}, Length Err: {:.1f}, BLEU: {:.2f}, time: {:.1f}s'.format(tau, length_err / num_insts, b, time_cost), log)
if bleu < b:
bleu = b
return bleu
def reconstruct(epoch, dataset, dataloader, flownmt, result_path, log):
flownmt.eval()
recons = []
lengths = []
recon_loss = 0.
length_loss = 0.
length_loss_pred = 0.
length_err = 0.
num_insts = 0
num_words = 0
start_time = time.time()
for src, tgt, src_masks, tgt_masks in dataloader:
recon, recon_err, llen, lens, llen_pred = flownmt.reconstruct(src, tgt, src_masks, tgt_masks)
recon_loss += recon_err.sum().item()
length_loss += llen.sum().item()
length_loss_pred += llen_pred.sum().item()
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
num_words += tgt_masks.sum().item()
recons.append(recon)
lengths.append(tgt_masks.sum(dim=1).long())
logging('reconstruct time: {:.1f}s'.format(time.time() - start_time), log)
outfile = os.path.join(result_path, 'reconstruct{}.mt'.format(epoch))
dataset.dump_to_file(recons, lengths, outfile)
bleu = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
recon_loss_per_word = recon_loss / num_words
recon_loss = recon_loss / num_insts
length_loss = length_loss / num_insts
length_loss_pred = length_loss_pred / num_insts
length_err = length_err / num_insts
logging('Reconstruct BLEU: {:.2f}, NLL: {:.2f} ({:.2f}), Length NLL: {:.2f} ({:.2f}), Err: {:.1f}'.format(
bleu, recon_loss, recon_loss_per_word, length_loss, length_loss_pred, length_err), log)
def eval(args, epoch, dataset, dataloader, flownmt):
flownmt.eval()
flownmt.sync()
# reconstruct
reconstruct(epoch, dataset, dataloader, flownmt, args.result_path, args.log)
# translate
bleu = translate(epoch, dataset, dataloader, flownmt, args.result_path, args.log)
recon_loss = 0.
kl_loss = 0.
length_loss = 0.
num_insts = 0
num_words = 0
test_k = 3
for src, tgt, src_masks, tgt_masks in dataloader:
recon, kl, llen = flownmt.loss(src, tgt, src_masks, tgt_masks, nsamples=test_k, eval=True)
recon_loss += recon.sum().item()
kl_loss += kl.sum().item()
length_loss += llen.sum().item()
num_insts += src.size(0)
num_words += tgt_masks.sum().item()
kl_loss = kl_loss / num_insts
recon_loss = recon_loss / num_insts
length_loss = length_loss / num_insts
nll = kl_loss + recon_loss
ppl = np.exp(nll * num_insts / num_words)
logging('Ave NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}, BLEU: {:.2f}'.format(
nll, recon_loss, kl_loss, length_loss, ppl, bleu), args.log)
logging('-' * 100, args.log)
return bleu, nll, recon_loss, kl_loss, length_loss, ppl
def setup(args):
args.cuda = torch.cuda.is_available()
random_seed = args.seed + args.rank if args.rank >= 0 else args.seed
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
device = torch.device('cuda', args.local_rank) if args.cuda else torch.device('cpu')
if args.cuda:
torch.cuda.set_device(device)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.benchmark = False
model_path = args.model_path
args.checkpoint_name = os.path.join(model_path, 'checkpoint')
result_path = os.path.join(model_path, 'translations')
args.result_path = result_path
vocab_path = os.path.join(model_path, 'vocab')
data_path = args.data_path
args.world_size = int(os.environ["WORLD_SIZE"]) if args.rank >=0 else 0
print("Rank {}".format(args.rank), args)
if args.rank <= 0:
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(result_path):
os.makedirs(result_path)
if not os.path.exists(vocab_path):
os.makedirs(vocab_path)
args.log = open(os.path.join(model_path, 'log.txt'), 'w')
if args.recover > 0:
params = json.load(open(os.path.join(model_path, 'config.json'), 'r'))
src_lang = params['src']
tgt_lang = params['tgt']
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
src_vocab_size = params['src_vocab_size']
tgt_vocab_size = params['tgt_vocab_size']
args.max_src_length = params['max_src_length']
args.max_tgt_length = params['max_tgt_length']
dataset = NMTDataSet(data_path, src_lang, tgt_lang,
src_vocab_path, tgt_vocab_path,
src_vocab_size, tgt_vocab_size,
subword=args.subword, create_vocab=False)
assert src_vocab_size == dataset.src_vocab_size
assert tgt_vocab_size == dataset.tgt_vocab_size
else:
params = json.load(open(args.config, 'r'))
src_lang = args.src
tgt_lang = args.tgt
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
create_vocab = args.create_vocab
src_max_vocab = params.pop('{}_vocab_size'.format(src_lang))
tgt_max_vocab = params.pop('{}_vocab_size'.format(tgt_lang))
args.max_src_length = params.pop('max_{}_length'.format(src_lang))
args.max_tgt_length = params.pop('max_{}_length'.format(tgt_lang))
dataset = NMTDataSet(data_path, src_lang, tgt_lang,
src_vocab_path, tgt_vocab_path,
src_max_vocab, tgt_max_vocab,
subword=args.subword, create_vocab=create_vocab)
params['src'] = src_lang
params['tgt'] = tgt_lang
params['src_vocab_size'] = dataset.src_vocab_size
params['tgt_vocab_size'] = dataset.tgt_vocab_size
params['max_src_length'] = args.max_src_length
params['max_tgt_length'] = args.max_tgt_length
params['src_pad_idx'] = dataset.src_pad_idx
params['tgt_pad_idx'] = dataset.tgt_pad_idx
if args.share_all_embeddings:
assert 'share_embed' not in params or params['share_embed'], 'share embedding args conflicts'
assert 'tie_weights' not in params or params['tie_weights'], 'tie weights args conflicts'
params['share_embed'] = True
params['tie_weights'] = True
else:
params.setdefault('share_embed', False)
params.setdefault('tie_weights', False)
json.dump(params, open(os.path.join(model_path, 'config.json'), 'w'), indent=2)
flownmt = FlowNMT.from_params(params)
flownmt.to(device)
args.length_unit = flownmt.length_unit
args.device = device
args.steps_per_epoch = 1000
return args, dataset, flownmt
def init_dataloader(args, dataset):
batch_by_tokens = args.loss_type == 'token'
train_iter = DataIterator(dataset, args.batch_size, batch_by_tokens, args.max_src_length, args.max_tgt_length,
5000, args.device, args.result_path, multi_scale=args.length_unit,
corpus="train", bucket_data=args.bucket_batch, rank=args.rank,
num_replicas=args.world_size)
if args.rank <= 0:
eval_batch = args.eval_batch_size
val_iter = DataIterator(dataset, eval_batch, batch_by_tokens, args.max_src_length, args.max_tgt_length,
1000, args.device, args.result_path, corpus="dev",
bucket_data=args.bucket_batch, multi_scale=args.length_unit)
test_iter = DataIterator(dataset, eval_batch, batch_by_tokens, args.max_src_length, args.max_tgt_length,
1000, args.device, args.result_path, corpus="test",
bucket_data=args.bucket_batch, multi_scale=args.length_unit)
else:
val_iter, test_iter = None, None
return train_iter, val_iter, test_iter
def init_model(args, train_iter, flownmt):
flownmt.eval()
init_batch_size = args.init_batch_size
if args.rank <= 0:
logging('Rank {}, init model: {} instances'.format(args.rank, init_batch_size), args.log)
else:
print('Rank {}, init model: {} instances'.format(args.rank, init_batch_size))
src_sents, tgt_sents, src_masks, tgt_masks = train_iter.get_batch(init_batch_size)
if args.rank <= 0:
logging("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)), args.log)
else:
print("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)))
flownmt.init(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0)
def init_posterior(args, train_iter, flownmt):
flownmt.eval()
init_batch_size = args.init_batch_size
if args.rank <= 0:
logging('Rank {}, init posterior: {} instances'.format(args.rank, init_batch_size), args.log)
else:
print('Rank {}, init posterior: {} instances'.format(args.rank, init_batch_size))
src_sents, tgt_sents, src_masks, tgt_masks = train_iter.get_batch(init_batch_size)
if args.rank <= 0:
logging("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)), args.log)
else:
print("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)))
flownmt.init_posterior(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0)
def init_prior(args, train_iter, flownmt):
flownmt.eval()
init_batch_size = args.init_batch_size
if args.rank <= 0:
logging('Rank {}, init prior: {} instances'.format(args.rank, init_batch_size), args.log)
else:
print('Rank {}, init prior: {} instances'.format(args.rank, init_batch_size))
src_sents, tgt_sents, src_masks, tgt_masks = train_iter.get_batch(init_batch_size)
if args.rank <= 0:
logging("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)), args.log)
else:
print("maximum sentence length (src, tgt): {}, {}".format(src_sents.size(1), tgt_sents.size(1)))
flownmt.init_prior(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0)
def pretrain_model(args, dataset, train_iter, val_iter, flownmt, zero_steps):
device = args.device
steps_per_epoch = args.steps_per_epoch
loss_ty_token = args.loss_type == 'token'
lr_decay = args.lr_decay
betas = (args.beta1, args.beta2)
eps = args.eps
amsgrad = args.amsgrad
weight_decay = args.weight_decay
grad_clip = args.grad_clip
batch_steps = max(1, args.batch_steps // 2)
log = args.log if args.rank <=0 else None
warmup_steps = min(4000, zero_steps)
optimizer, scheduler = get_optimizer(args.lr, flownmt.parameters(), betas, eps, amsgrad, weight_decay, lr_decay,
warmup_steps, init_lr=1e-7)
lr = scheduler.get_lr()[0]
recon_loss = torch.Tensor([0.]).to(device)
length_loss = torch.Tensor([0.]).to(device)
num_insts = torch.Tensor([0.]).to(device)
num_words = torch.Tensor([0.]).to(device)
num_nans = 0
num_back = 0
flownmt.train()
start_time = time.time()
if args.rank <= 0:
logging('Init Epoch: %d, lr=%.6f (%s), betas=(%.1f, %.3f), eps=%.1e, amsgrad=%s, l2=%.1e' % (
1, lr, lr_decay, betas[0], betas[1], eps, amsgrad, weight_decay), log)
for step, (src_sents, tgt_sents, src_masks, tgt_masks) in enumerate(train_iter):
batch_size = src_sents.size(0)
words = tgt_masks.sum().item()
recon_batch = 0.
llen_batch = 0.
optimizer.zero_grad()
src_sents = [src_sents, ] if batch_steps == 1 else src_sents.chunk(batch_steps, dim=0)
tgt_sents = [tgt_sents, ] if batch_steps == 1 else tgt_sents.chunk(batch_steps, dim=0)
src_masks = [src_masks, ] if batch_steps == 1 else src_masks.chunk(batch_steps, dim=0)
tgt_masks = [tgt_masks, ] if batch_steps == 1 else tgt_masks.chunk(batch_steps, dim=0)
# disable allreduce for accumulated gradient.
if args.rank >= 0:
flownmt.disable_allreduce()
for src, tgt, src_mask, tgt_mask in zip(src_sents[:-1], tgt_sents[:-1], src_masks[:-1], tgt_masks[:-1]):
recon, llen = flownmt.reconstruct_error(src, tgt, src_mask, tgt_mask)
recon = recon.sum()
llen = llen.sum()
if loss_ty_token:
loss = (recon + llen).div(words)
else:
loss = (recon + llen).div(batch_size)
loss.backward()
with torch.no_grad():
recon_batch += recon.item()
llen_batch += llen.item()
# enable allreduce for the last step.
if args.rank >= 0:
flownmt.enable_allreduce()
src, tgt, src_mask, tgt_mask = src_sents[-1], tgt_sents[-1], src_masks[-1], tgt_masks[-1]
recon, llen = flownmt.reconstruct_error(src, tgt, src_mask, tgt_mask)
recon = recon.sum()
llen = llen.sum()
if loss_ty_token:
loss = (recon + llen).div(words)
else:
loss = (recon + llen).div(batch_size)
loss.backward()
with torch.no_grad():
recon_batch += recon.item()
llen_batch += llen.item()
if grad_clip > 0:
grad_norm = clip_grad_norm_(flownmt.parameters(), grad_clip)
else:
grad_norm = total_grad_norm(flownmt.parameters())
if math.isnan(grad_norm):
num_nans += 1
else:
optimizer.step()
scheduler.step()
with torch.no_grad():
num_insts += batch_size
num_words += words
recon_loss += recon_batch
length_loss += llen_batch
if step % 10 == 0:
torch.cuda.empty_cache()
if step % args.log_interval == 0 and args.rank <= 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
nums = num_insts.item()
train_recon = recon_loss.item() / nums if nums > 0 else 0
recon_per_word = recon_loss.item() / num_words.item() if nums > 0 else 0
train_llen = length_loss.item() / nums if nums > 0 else 0
curr_step = step % steps_per_epoch
curr_lr = scheduler.get_lr()[0]
log_info = '[{}/{} ({:.0f}%) lr={:.6f} {}] recon: {:.2f} ({:.2f}), len: {:.2f}'.format(
curr_step, steps_per_epoch, 100. * curr_step / steps_per_epoch, curr_lr, num_nans,
train_recon, recon_per_word,
train_llen)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
if step % steps_per_epoch == 0 and step > 0 or step == zero_steps:
# new epoch
epoch = step // steps_per_epoch
lr = scheduler.get_lr()[0]
if args.rank >= 0:
dist.reduce(recon_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(length_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(num_insts, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(num_words, dst=0, op=dist.ReduceOp.SUM)
if args.rank <= 0:
nums = num_insts.item()
train_recon = recon_loss.item() / nums if nums > 0 else 0
recon_per_word = recon_loss.item() / num_words.item() if nums > 0 else 0
train_llen = length_loss.item() / nums if nums > 0 else 0
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
logging('Average recon: {:.2f}, ({:.2f}), len: {:.2f}, time: {:.1f}s'.format(
train_recon, recon_per_word, train_llen, time.time() - start_time), log)
logging('-' * 100, log)
with torch.no_grad():
reconstruct(epoch, dataset, val_iter, flownmt, args.result_path, log)
logging('-' * 100, log)
if step == zero_steps:
optimizer.zero_grad()
break
if args.rank <= 0:
logging('Init Epoch: %d, lr=%.6f (%s), betas=(%.1f, %.3f), eps=%.1e amsgrad=%s, l2=%.1e' % (
epoch + 1, lr, lr_decay, betas[0], betas[1], eps, amsgrad, weight_decay), log)
recon_loss = torch.Tensor([0.]).to(device)
length_loss = torch.Tensor([0.]).to(device)
num_insts = torch.Tensor([0.]).to(device)
num_words = torch.Tensor([0.]).to(device)
num_nans = 0
num_back = 0
flownmt.train()
start_time = time.time()
def train(args, dataset, train_iter, val_iter, test_iter, flownmt):
epochs = args.epochs
loss_ty_token = args.loss_type == 'token'
steps_per_epoch = args.steps_per_epoch
train_k = args.train_k
grad_clip = args.grad_clip
batch_steps = args.batch_steps
device = args.device
log = args.log if args.rank <=0 else None
kl_warmups = args.kl_warmup_steps
kl_annealing = lambda step: min(1.0, (step + 1) / float(kl_warmups)) if kl_warmups > 0 else 1.0
lr_decay = args.lr_decay
init_lr = args.lr
if lr_decay == 'expo':
lr_warmups = 0
elif lr_decay == 'inv_sqrt':
lr_warmups = 10000
else:
raise ValueError('unknown lr decay method: %s' % lr_decay)
betas = (args.beta1, args.beta2)
eps = args.eps
amsgrad = args.amsgrad
weight_decay = args.weight_decay
if args.recover > 0:
checkpoint_name = args.checkpoint_name + '{}.tar'.format(args.recover)
print(f"Rank = {args.rank}, loading from checkpoint {checkpoint_name}")
optimizer, scheduler = get_optimizer(args.lr, flownmt.parameters(), betas, eps, amsgrad=amsgrad,
weight_decay=weight_decay, lr_decay=lr_decay,
warmup_steps=lr_warmups, init_lr=init_lr)
checkpoint = torch.load(checkpoint_name, map_location=args.device)
epoch = checkpoint['epoch']
last_step = checkpoint['step']
flownmt.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
best_epoch = checkpoint['best_epoch']
best_bleu, test_bleu = checkpoint['best_bleu']
best_nll, test_nll = checkpoint['best_nll']
best_recon, test_recon = checkpoint['best_recon']
best_kl, test_kl = checkpoint['best_kl']
best_llen, test_llen = checkpoint['best_llen']
best_ppl, test_ppl = checkpoint['best_ppl']
del checkpoint
if args.rank <= 0:
with torch.no_grad():
logging('Evaluating after resuming model...', log)
eval(args, epoch, dataset, val_iter, flownmt)
else:
optimizer, scheduler = get_optimizer(args.lr, flownmt.parameters(), betas, eps, amsgrad=amsgrad,
weight_decay=weight_decay, lr_decay=lr_decay,
warmup_steps=lr_warmups, init_lr=init_lr)
epoch = 0
best_epoch = 0
best_bleu = 0.0
best_nll = 0.0
best_recon = 0.0
best_kl = 0.0
best_llen = 0.0
best_ppl = 0.0
last_step = -1
lr = scheduler.get_lr()[0]
recon_loss = torch.Tensor([0.]).to(device)
kl_loss = torch.Tensor([0.]).to(device)
length_loss = torch.Tensor([0.]).to(device)
num_insts = torch.Tensor([0.]).to(device)
num_words = torch.Tensor([0.]).to(device)
num_nans = 0
num_back = 0
flownmt.train()
start_time = time.time()
if args.rank <= 0:
logging('Epoch: %d (lr=%.6f (%s), betas=(%.1f, %.3f), eps=%.1e, amsgrad=%s, l2=%.1e, train_k=%d)' % (
epoch + 1, lr, lr_decay, betas[0], betas[1], eps, amsgrad, weight_decay, train_k), log)
for step, (src_sents, tgt_sents, src_masks, tgt_masks) in enumerate(train_iter):
if step <= last_step:
continue
optimizer.zero_grad()
batch_size = src_sents.size(0)
words = tgt_masks.sum().item()
recon_batch = 0
kl_batch = 0
llen_batch = 0
kl_weight = kl_annealing(step)
src_sents = [src_sents, ] if batch_steps == 1 else src_sents.chunk(batch_steps, dim=0)
tgt_sents = [tgt_sents, ] if batch_steps == 1 else tgt_sents.chunk(batch_steps, dim=0)
src_masks = [src_masks, ] if batch_steps == 1 else src_masks.chunk(batch_steps, dim=0)
tgt_masks = [tgt_masks, ] if batch_steps == 1 else tgt_masks.chunk(batch_steps, dim=0)
# disable allreduce for accumulated gradient.
if args.rank >= 0:
flownmt.disable_allreduce()
for src, tgt, src_mask, tgt_mask in zip(src_sents[:-1], tgt_sents[:-1], src_masks[:-1], tgt_masks[:-1]):
recon, kl, llen = flownmt.loss(src, tgt, src_masks=src_mask, tgt_masks=tgt_mask,
nsamples=train_k)
recon = recon.sum()
kl = kl.sum()
llen = llen.sum()
if loss_ty_token:
loss = (recon + kl * kl_weight + llen).div(words)
else:
loss = (recon + kl * kl_weight + llen).div(batch_size)
loss.backward()
with torch.no_grad():
recon_batch += recon.item()
kl_batch += kl.item()
llen_batch += llen.item()
# enable allreduce for the last step.
if args.rank >= 0:
flownmt.enable_allreduce()
src, tgt, src_mask, tgt_mask = src_sents[-1], tgt_sents[-1], src_masks[-1], tgt_masks[-1]
recon, kl, llen = flownmt.loss(src, tgt, src_masks=src_mask, tgt_masks=tgt_mask,
nsamples=train_k)
recon = recon.sum()
kl = kl.sum()
llen = llen.sum()
if loss_ty_token:
loss = (recon + kl * kl_weight + llen).div(words)
else:
loss = (recon + kl * kl_weight + llen).div(batch_size)
loss.backward()
with torch.no_grad():
recon_batch += recon.item()
kl_batch += kl.item()
llen_batch += llen.item()
if grad_clip > 0:
grad_norm = clip_grad_norm_(flownmt.parameters(), grad_clip)
else:
grad_norm = total_grad_norm(flownmt.parameters())
if math.isnan(grad_norm):
num_nans += 1
else:
optimizer.step()
scheduler.step()
num_insts += batch_size
num_words += words
kl_loss += kl_batch
recon_loss += recon_batch
length_loss += llen_batch
if step % 10 == 0:
torch.cuda.empty_cache()
if step % args.log_interval == 0 and args.rank <= 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
nums = num_insts.item()
train_recon = recon_loss.item() / nums if nums > 0 else 0
train_kl = kl_loss.item() / nums if nums > 0 else 0
train_llen = length_loss.item() / nums if nums > 0 else 0
train_nll = train_recon + train_kl
train_ppl = np.exp(train_nll * nums / num_words.item()) if nums > 0 else 0
train_ppl = float('inf') if train_ppl > 10000 else train_ppl
curr_lr = scheduler.get_lr()[0]
curr_step = step if step == steps_per_epoch else step % steps_per_epoch
log_info = '[{}/{} ({:.0f}%) lr={:.6f}, klw={:.2f} {}] NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}'.format(
curr_step, steps_per_epoch, 100. * curr_step / steps_per_epoch, curr_lr, kl_weight, num_nans,
train_nll, train_recon, train_kl, train_llen, train_ppl)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
if step % steps_per_epoch == 0 and step > 0:
# new epoch
epoch = step // steps_per_epoch
lr = scheduler.get_lr()[0]
if args.rank >= 0:
dist.reduce(recon_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(kl_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(length_loss, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(num_insts, dst=0, op=dist.ReduceOp.SUM)
dist.reduce(num_words, dst=0, op=dist.ReduceOp.SUM)
if args.rank <= 0:
nums = num_insts.item()
train_recon = recon_loss.item() / nums if nums > 0 else 0
train_kl = kl_loss.item() / nums if nums > 0 else 0
train_llen = length_loss.item() / nums if nums > 0 else 0
train_nll = train_recon + train_kl
train_ppl = np.exp(train_nll * nums / num_words.item()) if nums > 0 else 0
train_ppl = float('inf') if train_ppl > 10000 else train_ppl
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
logging('Average NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}, time: {:.1f}s'.format(
train_nll, train_recon, train_kl, train_llen, train_ppl, time.time() - start_time), log)
logging('-' * 100, log)
with torch.no_grad():
logging('Evaluating validation data...', log)
bleu, nll, recon, kl, llen, ppl = eval(args, epoch, dataset, val_iter, flownmt)
if bleu > best_bleu or best_epoch == 0 or ppl < best_ppl:
flownmt.save(args.model_path)
best_bleu = bleu
best_epoch = epoch
best_nll = nll
best_recon = recon
best_kl = kl
best_llen = llen
best_ppl = ppl
logging('Evaluating test data...', log)
test_bleu, test_nll, test_recon, test_kl, test_llen, test_ppl = eval(args, epoch, dataset, test_iter, flownmt)
logging('Best Dev NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}, BLEU: {:.2f}, epoch: {}'.format(
best_nll, best_recon, best_kl, best_llen, best_ppl, best_bleu, best_epoch), log)
logging('Best Test NLL: {:.2f} (recon: {:.2f}, kl: {:.2f}), len: {:.2f}, PPL: {:.2f}, BLEU: {:.2f}, epoch: {}'.format(
test_nll, test_recon, test_kl, test_llen, test_ppl, test_bleu, best_epoch), log)
logging('=' * 100, log)
# save checkpoint
checkpoint_name = args.checkpoint_name + '{}.tar'.format(epoch)
torch.save({'epoch': epoch,
'step': step,
'model': flownmt.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'best_bleu': [best_bleu, test_bleu],
'best_epoch': best_epoch,
'best_nll': [best_nll, test_nll],
'best_recon': [best_recon, test_recon],
'best_kl': [best_kl, test_kl],
'best_llen': [best_llen, test_llen],
'best_ppl': [best_ppl, test_ppl]}, checkpoint_name)
if epoch == epochs:
break
if args.rank <= 0:
logging('Epoch: %d (lr=%.6f (%s), betas=(%.1f, %.3f), eps=%.1e, amsgrad=%s, l2=%.1e, train_k=%d)' % (
epoch + 1, lr, lr_decay, betas[0], betas[1], eps, amsgrad, weight_decay, train_k), log)
recon_loss = torch.Tensor([0.]).to(device)
kl_loss = torch.Tensor([0.]).to(device)
length_loss = torch.Tensor([0.]).to(device)
num_insts = torch.Tensor([0.]).to(device)
num_words = torch.Tensor([0.]).to(device)
num_nans = 0
num_back = 0
flownmt.train()
start_time = time.time()
def main(args):
args, dataset, flownmt = setup(args)
train_iter, val_iter, test_iter = init_dataloader(args, dataset)
pretrain = args.recover < 0 and args.init_steps > 0
checkpoint_name = args.checkpoint_name + '{}.tar'.format(0)
if args.rank <= 0:
# initialize model (rank 0 or -1)
# number of parameters
logging('Rank %d # of Parameters: %d' % (args.rank, sum([param.numel() for param in flownmt.parameters()])), args.log)
if args.recover == 0:
flownmt.load_core(checkpoint_name, args.device, load_prior=True)
with torch.no_grad():
reconstruct(0, dataset, val_iter, flownmt, args.result_path, args.log)
logging('-' * 100, args.log)
if args.rank >= 0:
flownmt.init_distributed(args.rank, args.local_rank)
if pretrain:
init_posterior(args, train_iter, flownmt)
elif args.recover < 0:
init_model(args, train_iter, flownmt)
if args.rank >= 0:
flownmt.sync_params()
if pretrain:
zero_steps = args.init_steps
pretrain_model(args, dataset, train_iter, val_iter, flownmt, zero_steps)
init_prior(args, train_iter, flownmt)
if args.rank >= 0:
flownmt.sync_params()
if args.rank <= 0:
flownmt.save_core(checkpoint_name)
train(args, dataset, train_iter, val_iter, test_iter, flownmt)
if __name__ == "__main__":
args = parse_args()
assert args.rank == -1 and args.local_rank == 0, 'single process should have wrong rank ({}) or local rank ({})'.format(args.rank, args.local_rank)
main(args)
| 34,259 | 41.559006 | 151 | py |
flowseq | flowseq-master/experiments/slurm.py | import sys
import os
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import torch.multiprocessing as mp
import experiments.options as options
from experiments.nmt import main as single_process_main
def main():
args = options.parse_distributed_args()
args_dict = vars(args)
args_dict.pop('master_addr')
str(args_dict.pop('master_port'))
args_dict.pop('nnodes')
args_dict.pop('nproc_per_node')
args_dict.pop('node_rank')
current_env = os.environ
nnodes = int(current_env['SLURM_NNODES'])
dist_world_size = int(current_env['SLURM_NTASKS'])
args.rank = int(current_env['SLURM_PROCID'])
args.local_rank = int(current_env['SLURM_LOCALID'])
print('start process: rank={}({}), master addr={}, port={}, nnodes={}, world size={}'.format(
args.rank, args.local_rank, current_env["MASTER_ADDR"], current_env["MASTER_PORT"], nnodes, dist_world_size))
current_env["WORLD_SIZE"] = str(dist_world_size)
create_vocab = args_dict.pop('create_vocab')
assert not create_vocab
args.create_vocab = False
batch_size = args.batch_size // dist_world_size
args.batch_size = batch_size
single_process_main(args)
if __name__ == "__main__":
mp.set_start_method('forkserver')
main()
| 1,374 | 27.645833 | 117 | py |
flowseq | flowseq-master/experiments/translate.py | import os
import sys
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import time
import json
import random
import numpy as np
import torch
from flownmt.data import NMTDataSet, DataIterator
from flownmt import FlowNMT
from experiments.options import parse_translate_args
def calc_bleu(fref, fmt, result_path):
script = os.path.join(current_path, 'scripts/multi-bleu.perl')
temp = os.path.join(result_path, 'tmp')
os.system("perl %s %s < %s > %s" % (script, fref, fmt, temp))
bleu = open(temp, 'r').read().strip()
bleu = bleu.split(",")[0].split("=")
if len(bleu) < 2:
return 0.0
bleu = float(bleu[1].strip())
return bleu
def translate_argmax(dataset, dataloader, flownmt, result_path, outfile, tau, n_tr):
flownmt.eval()
translations = []
lengths = []
length_err = 0
num_insts = 0
start_time = time.time()
num_back = 0
for step, (src, tgt, src_masks, tgt_masks) in enumerate(dataloader):
trans, lens = flownmt.translate_argmax(src, src_masks, n_tr=n_tr, tau=tau)
translations.append(trans)
lengths.append(lens)
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
if step % 10 == 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
log_info = 'argmax translating (tau={:.1f}, n_tr={})...{}'.format(tau, n_tr, num_insts)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
print('time: {:.1f}s'.format(time.time() - start_time))
outfile = os.path.join(result_path, outfile)
dataset.dump_to_file(translations, lengths, outfile)
bleu = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
print('#SENT: {}, Length Err: {:.1f}, BLEU: {:.2f}'.format(num_insts, length_err / num_insts, bleu))
def translate_iw(dataset, dataloader, flownmt, result_path, outfile, tau, n_len, n_tr):
flownmt.eval()
iwk = 4
translations = []
lengths = []
length_err = 0
num_insts = 0
start_time = time.time()
num_back = 0
for step, (src, tgt, src_masks, tgt_masks) in enumerate(dataloader):
trans, lens = flownmt.translate_iw(src, src_masks, n_len=n_len, n_tr=n_tr, tau=tau, k=iwk)
translations.append(trans)
lengths.append(lens)
length_err += (lens.float() - tgt_masks.sum(dim=1)).abs().sum().item()
num_insts += src.size(0)
if step % 10 == 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
log_info = 'importance weighted translating (tau={:.1f}, n_len={}, n_tr={})...{}'.format(tau, n_len, n_tr, num_insts)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
print('time: {:.1f}s'.format(time.time() - start_time))
outfile = os.path.join(result_path, outfile)
dataset.dump_to_file(translations, lengths, outfile)
bleu = calc_bleu(dataloader.tgt_sort_origin_path, outfile, result_path)
print('#SENT: {}, Length Err: {:.1f}, BLEU: {:.2f}'.format(num_insts, length_err / num_insts, bleu))
def sample(dataset, dataloader, flownmt, result_path, outfile, tau, n_len, n_tr):
flownmt.eval()
lengths = []
translations = []
num_insts = 0
start_time = time.time()
num_back = 0
for step, (src, tgt, src_masks, tgt_masks) in enumerate(dataloader):
trans, lens = flownmt.translate_sample(src, src_masks, n_len=n_len, n_tr=n_tr, tau=tau)
translations.append(trans)
lengths.append(lens)
num_insts += src.size(0)
if step % 10 == 0:
sys.stdout.write("\b" * num_back)
sys.stdout.write(" " * num_back)
sys.stdout.write("\b" * num_back)
log_info = 'sampling (tau={:.1f}, n_len={}, n_tr={})...{}'.format(tau, n_len, n_tr, num_insts)
sys.stdout.write(log_info)
sys.stdout.flush()
num_back = len(log_info)
print('time: {:.1f}s'.format(time.time() - start_time))
outfile = os.path.join(result_path, outfile)
dataset.dump_to_file(translations, lengths, outfile, post_edit=False)
def setup(args):
args.cuda = torch.cuda.is_available()
random_seed = args.seed
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
device = torch.device('cuda', 0) if args.cuda else torch.device('cpu')
if args.cuda:
torch.cuda.set_device(device)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.benchmark = False
model_path = args.model_path
result_path = os.path.join(model_path, 'translations')
args.result_path = result_path
params = json.load(open(os.path.join(model_path, 'config.json'), 'r'))
src_lang = params['src']
tgt_lang = params['tgt']
data_path = args.data_path
vocab_path = os.path.join(model_path, 'vocab')
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
src_vocab_size = params['src_vocab_size']
tgt_vocab_size = params['tgt_vocab_size']
args.max_src_length = params.pop('max_src_length')
args.max_tgt_length = params.pop('max_tgt_length')
dataset = NMTDataSet(data_path, src_lang, tgt_lang,
src_vocab_path, tgt_vocab_path,
src_vocab_size, tgt_vocab_size,
subword=args.subword, create_vocab=False)
assert src_vocab_size == dataset.src_vocab_size
assert tgt_vocab_size == dataset.tgt_vocab_size
flownmt = FlowNMT.load(model_path, device=device)
args.length_unit = flownmt.length_unit
args.device = device
return args, dataset, flownmt
def init_dataloader(args, dataset):
eval_batch = args.batch_size
val_iter = DataIterator(dataset, eval_batch, 0, args.max_src_length, args.max_tgt_length, 1000, args.device, args.result_path,
bucket_data=args.bucket_batch, multi_scale=args.length_unit, corpus="dev")
test_iter = DataIterator(dataset, eval_batch, 0, args.max_src_length, args.max_tgt_length, 1000, args.device, args.result_path,
bucket_data=args.bucket_batch, multi_scale=args.length_unit, corpus="test")
return val_iter, test_iter
def main(args):
args, dataset, flownmt = setup(args)
print(args)
val_iter, test_iter = init_dataloader(args, dataset)
result_path = args.result_path
if args.decode == 'argmax':
tau = args.tau
n_tr = args.ntr
outfile = 'argmax.t{:.1f}.ntr{}.dev.mt'.format(tau, n_tr)
translate_argmax(dataset, val_iter, flownmt, result_path, outfile, tau, n_tr)
outfile = 'argmax.t{:.1f}.ntr{}.test.mt'.format(tau, n_tr)
translate_argmax(dataset, test_iter, flownmt, result_path, outfile, tau, n_tr)
elif args.decode == 'iw':
tau = args.tau
n_len = args.nlen
n_tr = args.ntr
outfile = 'iw.t{:.1f}.nlen{}.ntr{}.dev.mt'.format(tau, n_len, n_tr)
translate_iw(dataset, val_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
outfile = 'iw.t{:.1f}.nlen{}.ntr{}.test.mt'.format(tau, n_len, n_tr)
translate_iw(dataset, test_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
else:
assert not args.bucket_batch
tau = args.tau
n_len = args.nlen
n_tr = args.ntr
outfile = 'sample.t{:.1f}.nlen{}.ntr{}.dev.mt'.format(tau, n_len, n_tr)
sample(dataset, val_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
outfile = 'sample.t{:.1f}.nlen{}.ntr{}.test.mt'.format(tau, n_len, n_tr)
sample(dataset, test_iter, flownmt, result_path, outfile, tau, n_len, n_tr)
if __name__ == "__main__":
args = parse_translate_args()
with torch.no_grad():
main(args) | 8,142 | 39.311881 | 131 | py |
flowseq | flowseq-master/experiments/distributed.py | import sys
import os
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import json
import signal
import threading
import torch
from flownmt.data import NMTDataSet
import experiments.options as options
from experiments.nmt import main as single_process_main
def create_dataset(args):
model_path = args.model_path
if not os.path.exists(model_path):
os.makedirs(model_path)
result_path = os.path.join(model_path, 'translations')
if not os.path.exists(result_path):
os.makedirs(result_path)
vocab_path = os.path.join(model_path, 'vocab')
if not os.path.exists(vocab_path):
os.makedirs(vocab_path)
data_path = args.data_path
src_lang = args.src
tgt_lang = args.tgt
src_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(src_lang))
tgt_vocab_path = os.path.join(vocab_path, '{}.vocab'.format(tgt_lang))
params = json.load(open(args.config, 'r'))
src_max_vocab = params['{}_vocab_size'.format(src_lang)]
tgt_max_vocab = params['{}_vocab_size'.format(tgt_lang)]
NMTDataSet(data_path, src_lang, tgt_lang, src_vocab_path, tgt_vocab_path, src_max_vocab, tgt_max_vocab,
subword=args.subword, create_vocab=True)
def main():
args = options.parse_distributed_args()
args_dict = vars(args)
nproc_per_node = args_dict.pop('nproc_per_node')
nnodes = args_dict.pop('nnodes')
node_rank = args_dict.pop('node_rank')
# world size in terms of number of processes
dist_world_size = nproc_per_node * nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ
current_env["MASTER_ADDR"] = args_dict.pop('master_addr')
current_env["MASTER_PORT"] = str(args_dict.pop('master_port'))
current_env["WORLD_SIZE"] = str(dist_world_size)
create_vocab = args_dict.pop('create_vocab')
if create_vocab:
create_dataset(args)
args.create_vocab = False
batch_size = args.batch_size // dist_world_size
args.batch_size = batch_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
processes = []
for local_rank in range(0, nproc_per_node):
# each process's rank
dist_rank = nproc_per_node * node_rank + local_rank
args.rank = dist_rank
args.local_rank = local_rank
process = mp.Process(target=run, args=(args, error_queue, ), daemon=True)
process.start()
error_handler.add_child(process.pid)
processes.append(process)
for process in processes:
process.join()
def run(args, error_queue):
try:
single_process_main(args)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
self.children_pids.append(pid)
def error_listener(self):
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n"
msg += original_trace
raise Exception(msg)
if __name__ == "__main__":
main()
| 4,220 | 30.736842 | 107 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/test.py | from pathlib import Path
import os
import gc
import argparse
import cv2
from PIL import Image
Image.MAX_IMAGE_PIXELS = 933120000
import numpy as np
import matplotlib.cm as cm
from pyqtree import Index
import pickle
import torch
import time
from models.matching import Matching
from models.utils.utils import AverageTimer, VideoStreamer, frame2tensor, remove_kpts_on_building, segment_keypoints, update_last_data
from models.utils.utils_loc import generate_kml, retrieve_init_pixposition, update_current_GPS, UAV_loc_by_pix_PAffine
from models.utils.utils_plot import make_localization_plot
torch.set_grad_enabled(False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SuperGlue demo',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--input', type=str, default='./assets/DJI_data/images/',
help='URL of an IP camera, '
'or path to an image directory or movie file')
parser.add_argument(
'--output_dir', type=str, default='./output/images/',
help='Directory where to write output frames (If None, no output)')
parser.add_argument(
'--image_glob', type=str, nargs='+', default=['*.png', '*.jpg', '*.jpeg'],
help='Glob if a directory of images is specified')
parser.add_argument(
'--skip', type=int, default=1,
help='Images to skip if input is a movie or directory')
parser.add_argument(
'--max_length', type=int, default=1000000,
help='Maximum length if input is a movie or directory')
parser.add_argument(
'--resize', type=int, nargs='+', default=[1280, 720],
help='Resize the input image before running inference. If two numbers, '
'resize to the exact dimensions, if one number, resize the max '
'dimension, if -1, do not resize')
parser.add_argument(
'--superglue', choices={'indoor', 'outdoor'}, default='outdoor',
help='SuperGlue weights')
parser.add_argument(
'--apply_GIS', action='store_true',
help='segment matches keypoints from building and non-building')
parser.add_argument(
'--max_keypoints', type=int, default=-1,
help='Maximum number of keypoints detected by Superpoint'
' (\'-1\' keeps all keypoints)')
parser.add_argument(
'--keypoint_threshold', type=float, default=0.005,
help='SuperPoint keypoint detector confidence threshold')
parser.add_argument(
'--nms_radius', type=int, default=4,
help='SuperPoint Non Maximum Suppression (NMS) radius'
' (Must be positive)')
parser.add_argument(
'--sinkhorn_iterations', type=int, default=20,
help='Number of Sinkhorn iterations performed by SuperGlue')
parser.add_argument(
'--match_threshold', type=float, default=0.2,
help='SuperGlue match threshold')
parser.add_argument(
'--switch_threshold', type=int, default=50,
help='Threshold for switching keypoints from non-building to building')
parser.add_argument(
'--patience', type=int, default=10,
help='Patience for early stopping if UAV position was not updated over 10 seconds (video) or 10 frames(images), 0 is off.')
parser.add_argument(
'--KF_dt', type=float, default=1.0,
help='Time between steps in seconds')
parser.add_argument(
'--show_keypoints', action='store_true',
help='Show the detected keypoints')
parser.add_argument(
'--matching_vis', action='store_true',
help='Show the matched pairs')
parser.add_argument(
'--force_cpu', action='store_true',
help='Force pytorch to run in CPU mode.')
parser.add_argument(
'--satmap_init_gps', type=float, nargs='+', default=[40.01872927, -83.033835], # large sat
help='GPS of top-left corner of satellite map')
parser.add_argument(
'--Init_GPS', type=float, nargs='+', default=[40.012701, -83.009691], # Demo starting point GPS
help='Initial drone flight GPS')
parser.add_argument(
'--Orien', type=float, default=0.0,
help='UAV initial orientation is the angel to initially rotate first image clockwise to North direction, ranging from 0-360.')
parser.add_argument(
'--Init_height', type=float, default=None,
help='UAV initial flight height')
parser.add_argument(
'--bin_interval', type=int, default=10,
help='Divide 360 degrees into multiple bins, each bin shares certain degrees')
parser.add_argument(
'--range', type=int, nargs='+', default=[900, 900],
help='Crop partial satellite image size (WxH) as basemap for matching')
parser.add_argument(
'--update_freq', type=int, default=3,
help='Basemap update frequency. Update basemap once UAV center moves out of 1/k basemap range')
opt = parser.parse_args()
print(opt)
if len(opt.resize) == 2 and opt.resize[1] == -1:
opt.resize = opt.resize[0:1]
if len(opt.resize) == 2:
print('Will resize to {}x{} (WxH)'.format(
opt.resize[0], opt.resize[1]))
elif len(opt.resize) == 1 and opt.resize[0] > 0:
print('Will resize max dimension to {}'.format(opt.resize[0]))
elif len(opt.resize) == 1:
print('Will not resize images')
else:
raise ValueError('Cannot specify more than two integers for --resize')
device = 'cuda' if torch.cuda.is_available() and not opt.force_cpu else 'cpu'
print('Running inference on device \"{}\"'.format(device))
config = {
'superpoint': {
'nms_radius': opt.nms_radius,
'keypoint_threshold': opt.keypoint_threshold,
'max_keypoints': opt.max_keypoints
},
'superglue': {
'weights': opt.superglue,
'sinkhorn_iterations': opt.sinkhorn_iterations,
'match_threshold': opt.match_threshold,
}
}
matching = Matching(config).eval().to(device)
timer = AverageTimer()
# Load sat map info and its quadtree indexing file
satmap_kpts = np.load('./featurebase/satmap_kpts.npz')
image0, keypoints0, descriptors0, scores0 = satmap_kpts['image0'], satmap_kpts['keypoints0'], satmap_kpts['descriptors0'], satmap_kpts['scores0']
del satmap_kpts; gc.collect()
print('Satellite image size is {}x{} (HxW), containing {} keypoints'.format(*image0.shape, len(keypoints0)))
print('Max basemap range is {}x{} (WxH)'.format(*opt.range))
timer.update('Successfully loaded satellite map data, loading time',printout=True)
if os.path.exists('./featurebase/QuadTree_idx.pkl'):
with open('./featurebase/QuadTree_idx.pkl', 'rb') as inp:
spindex = pickle.load(inp)
else:
spindex = Index(bbox=(0, 0, image0.shape[1], image0.shape[0])) # Area of WxH
for i in range(len(keypoints0)):
w, h = keypoints0[i]
spindex.insert(i, (w,h,w,h))
# save quadtree indexing
with open('./featurebase/QuadTree_idx.pkl', 'wb') as outp:
pickle.dump(spindex, outp, pickle.HIGHEST_PROTOCOL)
timer.update('Successfully loaded satellite keypoints quadtree indexing, loading time',printout=True)
# Load satellite image GIS labels
mask = np.asarray(Image.open('./featurebase/GIS_mask.png'), dtype=np.int32) if opt.apply_GIS else None
timer.update('Successfully loaded GIS data, loading time',printout=True)
# Initialize frame0 (last_data) at the beginning
c_w, c_h = retrieve_init_pixposition(opt.satmap_init_gps, opt.Init_GPS)# basemap center in pixel distance in reference to top-left corner of satellite map
r_w, r_h = min(opt.range[0], c_w), min(opt.range[1], c_h) # in case it reaches satmap boundary
xmin, ymin, xmax, ymax = c_w-r_w, c_h-r_h, c_w+r_w, c_h+r_h
base_map = image0[ymin:ymax, xmin:xmax]
UAV_pix_pos_offset = [c_w-r_w, c_h-r_h]
timer.reset()
last_data, labels = update_last_data((image0, keypoints0, descriptors0, scores0), mask, spindex, (xmin, ymin, xmax, ymax), device) # return updated GIS labels if required
timer.update('Successfully updated last data, updating time',printout=True)
if opt.output_dir is not None:
print('==> Will write outputs to {}'.format(opt.output_dir))
Path(opt.output_dir).mkdir(exist_ok=True)
# dataloader
vs = VideoStreamer(opt)
frame, ret = vs.next_frame(1.0, go_next=False)
assert ret, 'Error when reading the first frame (try different --input?)'
# Initial parameters setup
timer = AverageTimer()
center, height = (r_w, r_h), opt.Init_height
not_valid, points, img_box = None, None, None
GPS = [] # save GPS as kml file which could be visualized at Google Earth
pred_GPS = opt.Init_GPS
Bins = round(opt.Orien/opt.bin_interval)
not_updated, offset, update_scale = 0, 0, 1.0
while True:
# update UAV rotation bins
Bins -= offset
Bins = (360/opt.bin_interval+Bins) if Bins<0 else Bins%(360/opt.bin_interval)
# update basemap range if center shift over range/2
if abs(center[0]-r_w)>r_w/opt.update_freq or abs(center[1]-r_h)>r_h/opt.update_freq:
c_w, c_h = center[0]+UAV_pix_pos_offset[0], center[1]+UAV_pix_pos_offset[1]
r_w, r_h = min(opt.range[0], c_w), min(opt.range[1], c_h) # in case it reaches satmap boundary
xmin, ymin, xmax, ymax = c_w-r_w, c_h-r_h, c_w+r_w, c_h+r_h
last_data, labels = update_last_data((image0, keypoints0, descriptors0, scores0), mask, spindex, (xmin, ymin, xmax, ymax), device) # return updated GIS labels if required
base_map = image0[ymin:ymax, xmin:xmax]
center, UAV_pix_pos_offset = (r_w, r_h), [c_w-r_w, c_h-r_h]
frame, ret = vs.next_frame(update_scale, rotate=True, bins=Bins)
if not ret or not_updated>opt.patience:
print('Finished UAV Geolocalization Inference')
break
stem1 = vs.i-1
timer.update('data')
frame_tensor = frame2tensor(frame, device)
pred = matching({**last_data, 'image1': frame_tensor})
kpts0 = last_data['keypoints0'][0].cpu().numpy()
kpts1 = pred['keypoints1'][0].cpu().numpy()
matches = pred['matches0'][0].cpu().numpy()
confidence = pred['matching_scores0'][0].cpu().numpy()
valid = matches > -1
if opt.apply_GIS:
valid, not_valid, use_ground, mkpts_count = segment_keypoints(valid, labels, opt.switch_threshold)
mkpts0 = kpts0[valid]
mkpts1 = kpts1[matches[valid]]
# keep matched keypoints not selected
mkpts0_other = kpts0[not_valid]
mkpts1_other = kpts1[matches[not_valid]]
color = cm.jet(confidence[valid])
timer.update('Matching')
# Geolocalize UAV once matches keypoints are over 50
if len(mkpts0)>=opt.switch_threshold:
mkpts = (use_ground, mkpts0, mkpts1, mkpts0_other, mkpts1_other)
# Geolocalize UAV with matched keypoints
center, points, img_box, M, offset, update_scale, avg_building_h = UAV_loc_by_pix_PAffine(frame, mkpts, UAV_pix_pos_offset, opt, vs.scale, not_updated, bins=Bins)
current_GPS = update_current_GPS(opt.satmap_init_gps, (center[0]+UAV_pix_pos_offset[0], center[1]+UAV_pix_pos_offset[1]))
height = -1.23904244+vs.scale*111.67527558
GeoLoc, not_updated = True, 0
else:
GeoLoc, offset = False, 0 # Initialize rotation offset
not_updated = not_updated+1 # Not able to geolocalize UAV, not_updated count+1
M, update_scale = [], 1.0 # Zeroize PAffine transformation mask and scale if unable to geolocalize UAV
print('Don\'t have enough matched keypoint pairs over {} frames'.format(not_updated))
if GeoLoc:
GPS.append([stem1, *current_GPS])
timer.update('Geolocalization')
# Visualize the matches.
if opt.matching_vis:
text = [
'Estimated GPS: ({:.6f}, {:.6f})'.format(*current_GPS),
'Heading Direction (degrees): {}'.format(int(360-Bins*opt.bin_interval)%360), # heading_direction = 360 - rotation_angle_offset
'Flight Height (meters): {}'.format(int(round(height)))
]
# Display extra parameter info.
k_thresh = matching.superpoint.config['keypoint_threshold']
m_thresh = matching.superglue.config['match_threshold']
small_text = [
'Keypoints: {}:{}'.format(len(kpts0), len(kpts1)),
'Ground/Building/Total: {}/{}/{}'.format(*mkpts_count, sum(mkpts_count)),
'Inliers pct: {:.2f}%'.format(np.sum(M)/len(M)*100),
'Scale/Update_scale : {:.2f}/{:.4f}'.format(vs.scale, update_scale)
]
out = make_localization_plot(GeoLoc, base_map, frame, kpts0, kpts1, mkpts0, mkpts1,color, opt.resize, center, points,
img_box, text, path=None, show_keypoints=opt.show_keypoints, small_text=small_text)
out = cv2.resize(out, (0,0), fx=1/2, fy=1/2)
# save sat image and frame t matched output
if opt.output_dir is not None:
stem = 'matches_{:06}'.format(stem1)
out_file = str(Path(opt.output_dir, stem + '.png'))
print('\n\nWriting image to {}'.format(out_file))
cv2.imwrite(out_file, out)
timer.update('Matching Vis')
timer.print(text='Timer {:04d}'.format(stem1))
cv2.destroyAllWindows()
vs.cleanup()
# save predicted GPS to .txt file
# save predicted current GPS
f = open(opt.output_dir+"GPS_pred.txt", "w")
for item in GPS:
f.write(f'{item[0]}\t{item[1]}\t{item[2]}\n')
f.close()
# save predicted GPS as .kml file
GPS_kml = [(item[2], item[1], 1.0) for item in GPS]
kml = generate_kml(GPS_kml, is_gt=False)
kml.save(str(Path(opt.output_dir, 'GPS_pred.kml')))
print('Saving predicted UAV GPS as .txt and .kml file')
print('Inference done!')
| 14,274 | 45.347403 | 182 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/Feature_extractor.py | from pathlib import Path
import argparse
import numpy as np
import torch
import json
import os
from models.matching import Matching
from models.utils.utils import (AverageTimer, VideoStreamer, load_encoder_img, frame2tensor)
torch.set_grad_enabled(False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SuperGlue demo',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--input', type=str, default='0',
help='ID of a USB webcam, URL of an IP camera, '
'or path to an image directory or movie file')
parser.add_argument(
'--output_dir', type=str, default=None,
help='Directory where to write output frames (If None, no output)')
parser.add_argument(
'--resize', type=int, nargs='+', default=[1280, 720],
help='Resize the input image before running inference. If two numbers, '
'resize to the exact dimensions, if one number, resize the max '
'dimension, if -1, do not resize')
parser.add_argument(
'--map_row_col', type=int, nargs='+', default=[4,4],
help='Map composed with row*col sub-maps')
parser.add_argument(
'--superglue', choices={'indoor', 'outdoor'}, default='outdoor',
help='SuperGlue weights')
parser.add_argument(
'--max_keypoints', type=int, default=-1,
help='Maximum number of keypoints detected by Superpoint'
' (\'-1\' keeps all keypoints)')
parser.add_argument(
'--keypoint_threshold', type=float, default=0.005,
help='SuperPoint keypoint detector confidence threshold')
parser.add_argument(
'--nms_radius', type=int, default=4,
help='SuperPoint Non Maximum Suppression (NMS) radius'
' (Must be positive)')
parser.add_argument(
'--sinkhorn_iterations', type=int, default=20,
help='Number of Sinkhorn iterations performed by SuperGlue')
parser.add_argument(
'--match_threshold', type=float, default=0.2,
help='SuperGlue match threshold')
parser.add_argument(
'--force_cpu', action='store_true',
help='Force pytorch to run in CPU mode.')
opt = parser.parse_args()
print(opt)
if len(opt.resize) == 2 and opt.resize[1] == -1:
opt.resize = opt.resize[0:1]
if len(opt.resize) == 2:
print('Will resize to {}x{} (WxH)'.format(
opt.resize[0], opt.resize[1]))
elif len(opt.resize) == 1 and opt.resize[0] > 0:
print('Will resize max dimension to {}'.format(opt.resize[0]))
elif len(opt.resize) == 1:
print('Will not resize images')
else:
raise ValueError('Cannot specify more than two integers for --resize')
device = 'cuda' if torch.cuda.is_available() and not opt.force_cpu else 'cpu'
print('Running inference on device \"{}\"'.format(device))
config = {
'superpoint': {
'nms_radius': opt.nms_radius,
'keypoint_threshold': opt.keypoint_threshold,
'max_keypoints': opt.max_keypoints
},
'superglue': {
'weights': opt.superglue,
'sinkhorn_iterations': opt.sinkhorn_iterations,
'match_threshold': opt.match_threshold,
}
}
matching = Matching(config).eval().to(device)
keys = ['keypoints', 'scores', 'descriptors']
if opt.output_dir is not None:
print('==> Will write outputs to {}'.format(opt.output_dir))
Path(opt.output_dir).mkdir(exist_ok=True)
# Load timer and dataloader
print('==> Processing image directory input: {}'.format(opt.input))
img_dirs = []
for i in range(opt.map_row_col[0]):
for j in range(opt.map_row_col[1]):
dir = 'sat_{}_{}.png'.format(i,j)
img_dirs.append(opt.input+dir)
if len(opt.resize) == 1:
img = load_encoder_img(img_dirs[0], opt.resize)
opt.resize = [img.shape[1], img.shape[0]]
# Initilize feature keypoints
kpts = {'keypoints0':np.empty([0,2]),
'scores0':np.empty([0]),
'descriptors0':np.empty([256,0]),
'image0':np.empty([opt.resize[1]*opt.map_row_col[0], opt.resize[0]*opt.map_row_col[1]])}
for i, imdir in enumerate(img_dirs):
frame = load_encoder_img(imdir, opt.resize)
frame_tensor = frame2tensor(frame, device)
last_data = matching.superpoint({'image': frame_tensor})
last_data = {k+'0': last_data[k][0].cpu().numpy() for k in keys}
row = opt.resize[1]*(i//opt.map_row_col[1])
col = opt.resize[0]*(i%opt.map_row_col[1])
print('row,col:', row, col)
# Reorgnize keypoints
last_data['keypoints0'] = last_data['keypoints0']+np.array([col,row])
kpts['keypoints0'] = np.concatenate((kpts['keypoints0'],last_data['keypoints0']), axis=0)
kpts['scores0'] = np.concatenate((kpts['scores0'],last_data['scores0']), axis=0)
kpts['descriptors0'] = np.concatenate((kpts['descriptors0'],last_data['descriptors0']), axis=1)
kpts['image0'][row:row+opt.resize[1], col:col+opt.resize[0]] = frame
image0_info = {'keypoints0':kpts['keypoints0'],
'scores0':kpts['scores0'],
'descriptors0':kpts['descriptors0'],
'image0':kpts['image0']}
# save kpts into npz file
np.savez(opt.output_dir+'/satmap_kpts.npz', **image0_info)
| 5,454 | 38.528986 | 103 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/matching.py | # %BANNER_BEGIN%
# ---------------------------------------------------------------------
# %COPYRIGHT_BEGIN%
#
# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
#
# Unpublished Copyright (c) 2020
# Magic Leap, Inc., All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains the property
# of COMPANY. The intellectual and technical concepts contained herein
# are proprietary to COMPANY and may be covered by U.S. and Foreign
# Patents, patents in process, and are protected by trade secret or
# copyright law. Dissemination of this information or reproduction of
# this material is strictly forbidden unless prior written permission is
# obtained from COMPANY. Access to the source code contained herein is
# hereby forbidden to anyone except current COMPANY employees, managers
# or contractors who have executed Confidentiality and Non-disclosure
# agreements explicitly covering such access.
#
# The copyright notice above does not evidence any actual or intended
# publication or disclosure of this source code, which includes
# information that is confidential and/or proprietary, and is a trade
# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS
# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE
# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.
#
# %COPYRIGHT_END%
# ----------------------------------------------------------------------
# %AUTHORS_BEGIN%
#
# Originating Authors: Paul-Edouard Sarlin
#
# %AUTHORS_END%
# --------------------------------------------------------------------*/
# %BANNER_END%
import torch
from .superpoint import SuperPoint
from .superglue import SuperGlue
class Matching(torch.nn.Module):
""" Image Matching Frontend (SuperPoint + SuperGlue) """
def __init__(self, config={}):
super().__init__()
self.superpoint = SuperPoint(config.get('superpoint', {}))
self.superglue = SuperGlue(config.get('superglue', {}))
def forward(self, data):
""" Run SuperPoint (optionally) and SuperGlue
SuperPoint is skipped if ['keypoints0', 'keypoints1'] exist in input
Args:
data: dictionary with minimal keys: ['image0', 'image1']
"""
pred = {}
# Extract SuperPoint (keypoints, scores, descriptors) if not provided
if 'keypoints0' not in data:
pred0 = self.superpoint({'image': data['image0']})
pred = {**pred, **{k+'0': v for k, v in pred0.items()}}
if 'keypoints1' not in data:
pred1 = self.superpoint({'image': data['image1']})
pred = {**pred, **{k+'1': v for k, v in pred1.items()}}
# Batch all features
# We should either have i) one image per batch, or
# ii) the same number of local features for all images in the batch.
data = {**data, **pred}
for k in data:
if isinstance(data[k], (list, tuple)):
data[k] = torch.stack(data[k])
# Perform the matching
pred = {**pred, **self.superglue(data)}
return pred
| 3,417 | 39.211765 | 77 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/superglue.py | # %BANNER_BEGIN%
# ---------------------------------------------------------------------
# %COPYRIGHT_BEGIN%
#
# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
#
# Unpublished Copyright (c) 2020
# Magic Leap, Inc., All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains the property
# of COMPANY. The intellectual and technical concepts contained herein
# are proprietary to COMPANY and may be covered by U.S. and Foreign
# Patents, patents in process, and are protected by trade secret or
# copyright law. Dissemination of this information or reproduction of
# this material is strictly forbidden unless prior written permission is
# obtained from COMPANY. Access to the source code contained herein is
# hereby forbidden to anyone except current COMPANY employees, managers
# or contractors who have executed Confidentiality and Non-disclosure
# agreements explicitly covering such access.
#
# The copyright notice above does not evidence any actual or intended
# publication or disclosure of this source code, which includes
# information that is confidential and/or proprietary, and is a trade
# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS
# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE
# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.
#
# %COPYRIGHT_END%
# ----------------------------------------------------------------------
# %AUTHORS_BEGIN%
#
# Originating Authors: Paul-Edouard Sarlin
#
# %AUTHORS_END%
# --------------------------------------------------------------------*/
# %BANNER_END%
from copy import deepcopy
from pathlib import Path
import torch
from torch import nn
def MLP(channels: list, do_bn=True):
""" Multi-layer perceptron """
n = len(channels)
layers = []
for i in range(1, n):
layers.append(
nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
if i < (n-1):
if do_bn:
layers.append(nn.BatchNorm1d(channels[i]))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def normalize_keypoints(kpts, image_shape):
""" Normalize keypoints locations based on image image_shape"""
_, _, height, width = image_shape
one = kpts.new_tensor(1)
size = torch.stack([one*width, one*height])[None]
center = size / 2
scaling = size.max(1, keepdim=True).values * 0.7
return (kpts - center[:, None, :]) / scaling[:, None, :]
class KeypointEncoder(nn.Module):
""" Joint encoding of visual appearance and location using MLPs"""
def __init__(self, feature_dim, layers):
super().__init__()
self.encoder = MLP([3] + layers + [feature_dim])
nn.init.constant_(self.encoder[-1].bias, 0.0)
def forward(self, kpts, scores):
inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)]
return self.encoder(torch.cat(inputs, dim=1))
def attention(query, key, value):
dim = query.shape[1]
scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5
prob = torch.nn.functional.softmax(scores, dim=-1)
return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob
class MultiHeadedAttention(nn.Module):
""" Multi-head attention to increase model expressivitiy """
def __init__(self, num_heads: int, d_model: int):
super().__init__()
assert d_model % num_heads == 0
self.dim = d_model // num_heads
self.num_heads = num_heads
self.merge = nn.Conv1d(d_model, d_model, kernel_size=1)
self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)])
def forward(self, query, key, value):
batch_dim = query.size(0)
query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1)
for l, x in zip(self.proj, (query, key, value))]
x, _ = attention(query, key, value)
return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1))
class AttentionalPropagation(nn.Module):
def __init__(self, feature_dim: int, num_heads: int):
super().__init__()
self.attn = MultiHeadedAttention(num_heads, feature_dim)
self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim])
nn.init.constant_(self.mlp[-1].bias, 0.0)
def forward(self, x, source):
message = self.attn(x, source, source)
return self.mlp(torch.cat([x, message], dim=1))
class AttentionalGNN(nn.Module):
def __init__(self, feature_dim: int, layer_names: list):
super().__init__()
self.layers = nn.ModuleList([
AttentionalPropagation(feature_dim, 4)
for _ in range(len(layer_names))])
self.names = layer_names
def forward(self, desc0, desc1):
for layer, name in zip(self.layers, self.names):
if name == 'cross':
src0, src1 = desc1, desc0
else: # if name == 'self':
src0, src1 = desc0, desc1
delta0, delta1 = layer(desc0, src0), layer(desc1, src1)
desc0, desc1 = (desc0 + delta0), (desc1 + delta1)
return desc0, desc1
def log_sinkhorn_iterations(Z, log_mu, log_nu, iters: int):
""" Perform Sinkhorn Normalization in Log-space for stability"""
u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu)
for _ in range(iters):
u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2)
v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1)
return Z + u.unsqueeze(2) + v.unsqueeze(1)
def log_optimal_transport(scores, alpha, iters: int):
""" Perform Differentiable Optimal Transport in Log-space for stability"""
b, m, n = scores.shape
one = scores.new_tensor(1)
ms, ns = (m*one).to(scores), (n*one).to(scores)
bins0 = alpha.expand(b, m, 1)
bins1 = alpha.expand(b, 1, n)
alpha = alpha.expand(b, 1, 1)
couplings = torch.cat([torch.cat([scores, bins0], -1),
torch.cat([bins1, alpha], -1)], 1)
norm = - (ms + ns).log()
log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm])
log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm])
log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1)
Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters)
Z = Z - norm # multiply probabilities by M+N
return Z
def arange_like(x, dim: int):
return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1
class SuperGlue(nn.Module):
"""SuperGlue feature matching middle-end
Given two sets of keypoints and locations, we determine the
correspondences by:
1. Keypoint Encoding (normalization + visual feature and location fusion)
2. Graph Neural Network with multiple self and cross-attention layers
3. Final projection layer
4. Optimal Transport Layer (a differentiable Hungarian matching algorithm)
5. Thresholding matrix based on mutual exclusivity and a match_threshold
The correspondence ids use -1 to indicate non-matching points.
Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew
Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural
Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763
"""
default_config = {
'descriptor_dim': 256,
'weights': 'indoor',
'keypoint_encoder': [32, 64, 128, 256],
'GNN_layers': ['self', 'cross'] * 9,
'sinkhorn_iterations': 100,
'match_threshold': 0.2,
}
def __init__(self, config):
super().__init__()
self.config = {**self.default_config, **config}
self.kenc = KeypointEncoder(
self.config['descriptor_dim'], self.config['keypoint_encoder'])
self.gnn = AttentionalGNN(
self.config['descriptor_dim'], self.config['GNN_layers'])
self.final_proj = nn.Conv1d(
self.config['descriptor_dim'], self.config['descriptor_dim'],
kernel_size=1, bias=True)
bin_score = torch.nn.Parameter(torch.tensor(1.))
self.register_parameter('bin_score', bin_score)
assert self.config['weights'] in ['indoor', 'outdoor']
path = Path(__file__).parent
path = path / 'weights/superglue_{}.pth'.format(self.config['weights'])
self.load_state_dict(torch.load(str(path)))
print('Loaded SuperGlue model (\"{}\" weights)'.format(
self.config['weights']))
def forward(self, data):
"""Run SuperGlue on a pair of keypoints and descriptors"""
desc0, desc1 = data['descriptors0'], data['descriptors1']
kpts0, kpts1 = data['keypoints0'], data['keypoints1']
if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints
shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1]
return {
'matches0': kpts0.new_full(shape0, -1, dtype=torch.int),
'matches1': kpts1.new_full(shape1, -1, dtype=torch.int),
'matching_scores0': kpts0.new_zeros(shape0),
'matching_scores1': kpts1.new_zeros(shape1),
}
# Keypoint normalization.
kpts0 = normalize_keypoints(kpts0, data['image0'].shape)
kpts1 = normalize_keypoints(kpts1, data['image1'].shape)
# Keypoint MLP encoder.
desc0 = desc0 + self.kenc(kpts0, data['scores0'])
desc1 = desc1 + self.kenc(kpts1, data['scores1'])
# Multi-layer Transformer network.
desc0, desc1 = self.gnn(desc0, desc1)
# Final MLP projection.
mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1)
# Compute matching descriptor distance.
scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1)
scores = scores / self.config['descriptor_dim']**.5
# Run the optimal transport.
scores = log_optimal_transport(
scores, self.bin_score,
iters=self.config['sinkhorn_iterations'])
# Get the matches with score above "match_threshold".
max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1)
indices0, indices1 = max0.indices, max1.indices
mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0)
mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1)
zero = scores.new_tensor(0)
mscores0 = torch.where(mutual0, max0.values.exp(), zero)
mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero)
valid0 = mutual0 & (mscores0 > self.config['match_threshold'])
valid1 = mutual1 & valid0.gather(1, indices1)
indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1))
indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1))
return {
'matches0': indices0, # use -1 for invalid match
'matches1': indices1, # use -1 for invalid match
'matching_scores0': mscores0,
'matching_scores1': mscores1,
}
| 11,316 | 38.848592 | 86 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/superpoint.py | # %BANNER_BEGIN%
# ---------------------------------------------------------------------
# %COPYRIGHT_BEGIN%
#
# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL
#
# Unpublished Copyright (c) 2020
# Magic Leap, Inc., All Rights Reserved.
#
# NOTICE: All information contained herein is, and remains the property
# of COMPANY. The intellectual and technical concepts contained herein
# are proprietary to COMPANY and may be covered by U.S. and Foreign
# Patents, patents in process, and are protected by trade secret or
# copyright law. Dissemination of this information or reproduction of
# this material is strictly forbidden unless prior written permission is
# obtained from COMPANY. Access to the source code contained herein is
# hereby forbidden to anyone except current COMPANY employees, managers
# or contractors who have executed Confidentiality and Non-disclosure
# agreements explicitly covering such access.
#
# The copyright notice above does not evidence any actual or intended
# publication or disclosure of this source code, which includes
# information that is confidential and/or proprietary, and is a trade
# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION,
# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS
# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS
# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND
# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE
# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS
# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE,
# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART.
#
# %COPYRIGHT_END%
# ----------------------------------------------------------------------
# %AUTHORS_BEGIN%
#
# Originating Authors: Paul-Edouard Sarlin
#
# %AUTHORS_END%
# --------------------------------------------------------------------*/
# %BANNER_END%
from pathlib import Path
import torch
from torch import nn
def simple_nms(scores, nms_radius: int):
""" Fast Non-maximum suppression to remove nearby points """
assert(nms_radius >= 0)
def max_pool(x):
return torch.nn.functional.max_pool2d(
x, kernel_size=nms_radius*2+1, stride=1, padding=nms_radius)
zeros = torch.zeros_like(scores)
max_mask = scores == max_pool(scores)
for _ in range(2):
supp_mask = max_pool(max_mask.float()) > 0
supp_scores = torch.where(supp_mask, zeros, scores)
new_max_mask = supp_scores == max_pool(supp_scores)
max_mask = max_mask | (new_max_mask & (~supp_mask))
return torch.where(max_mask, scores, zeros)
def remove_borders(keypoints, scores, border: int, height: int, width: int):
""" Removes keypoints too close to the border """
mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border))
mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border))
mask = mask_h & mask_w
return keypoints[mask], scores[mask]
def top_k_keypoints(keypoints, scores, k: int):
if k >= len(keypoints):
return keypoints, scores
scores, indices = torch.topk(scores, k, dim=0)
return keypoints[indices], scores
def sample_descriptors(keypoints, descriptors, s: int = 8):
""" Interpolate descriptors at keypoint locations """
b, c, h, w = descriptors.shape
keypoints = keypoints - s / 2 + 0.5
keypoints /= torch.tensor([(w*s - s/2 - 0.5), (h*s - s/2 - 0.5)],
).to(keypoints)[None]
keypoints = keypoints*2 - 1 # normalize to (-1, 1)
args = {'align_corners': True} if int(torch.__version__[2]) > 2 else {}
descriptors = torch.nn.functional.grid_sample(
descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', **args)
descriptors = torch.nn.functional.normalize(
descriptors.reshape(b, c, -1), p=2, dim=1)
return descriptors
class SuperPoint(nn.Module):
"""SuperPoint Convolutional Detector and Descriptor
SuperPoint: Self-Supervised Interest Point Detection and
Description. Daniel DeTone, Tomasz Malisiewicz, and Andrew
Rabinovich. In CVPRW, 2019. https://arxiv.org/abs/1712.07629
"""
default_config = {
'descriptor_dim': 256,
'nms_radius': 4,
'keypoint_threshold': 0.005,
'max_keypoints': -1,
'remove_borders': 4,
}
def __init__(self, config):
super().__init__()
self.config = {**self.default_config, **config}
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
c1, c2, c3, c4, c5 = 64, 64, 128, 128, 256
self.conv1a = nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1)
self.conv1b = nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1)
self.conv2a = nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1)
self.conv2b = nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1)
self.conv3a = nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1)
self.conv3b = nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1)
self.conv4a = nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1)
self.conv4b = nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1)
self.convPa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convPb = nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0)
self.convDa = nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convDb = nn.Conv2d(
c5, self.config['descriptor_dim'],
kernel_size=1, stride=1, padding=0)
path = Path(__file__).parent / 'weights/superpoint_v1.pth'
self.load_state_dict(torch.load(str(path)))
mk = self.config['max_keypoints']
if mk == 0 or mk < -1:
raise ValueError('\"max_keypoints\" must be positive or \"-1\"')
print('Loaded SuperPoint model')
def forward(self, data):
""" Compute keypoints, scores, descriptors for image """
# Shared Encoder
x = self.relu(self.conv1a(data['image']))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
# Compute the dense keypoint scores
cPa = self.relu(self.convPa(x))
scores = self.convPb(cPa)
scores = torch.nn.functional.softmax(scores, 1)[:, :-1]
b, _, h, w = scores.shape
scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, 8, 8)
scores = scores.permute(0, 1, 3, 2, 4).reshape(b, h*8, w*8)
scores = simple_nms(scores, self.config['nms_radius'])
# Extract keypoints
keypoints = [
torch.nonzero(s > self.config['keypoint_threshold'])
for s in scores]
scores = [s[tuple(k.t())] for s, k in zip(scores, keypoints)]
# Discard keypoints near the image borders
keypoints, scores = list(zip(*[
remove_borders(k, s, self.config['remove_borders'], h*8, w*8)
for k, s in zip(keypoints, scores)]))
# Keep the k keypoints with highest score
if self.config['max_keypoints'] >= 0:
keypoints, scores = list(zip(*[
top_k_keypoints(k, s, self.config['max_keypoints'])
for k, s in zip(keypoints, scores)]))
# Convert (h, w) to (x, y)
keypoints = [torch.flip(k, [1]).float() for k in keypoints]
# Compute the dense descriptors
cDa = self.relu(self.convDa(x))
descriptors = self.convDb(cDa)
descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1)
# Extract descriptors
descriptors = [sample_descriptors(k[None], d[None], 8)[0]
for k, d in zip(keypoints, descriptors)]
return {
'keypoints': keypoints,
'scores': scores,
'descriptors': descriptors,
}
| 8,145 | 39.128079 | 80 | py |
PCVLabDrone2021 | PCVLabDrone2021-main/UAV Geolocalization/models/utils/utils.py | from pathlib import Path
import time
from collections import OrderedDict
from threading import Thread
import numpy as np
import math
from vidgear.gears import CamGear
import cv2
import torch
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
class AverageTimer:
""" Class to help manage printing simple timing of code execution. """
def __init__(self, smoothing=0.3, newline=False):
self.smoothing = smoothing
self.newline = newline
self.times = OrderedDict()
self.will_print = OrderedDict()
self.reset()
def reset(self):
now = time.time()
self.start = now
self.last_time = now
for name in self.will_print:
self.will_print[name] = False
def update(self, name='default', printout=False):
now = time.time()
dt = now - self.last_time
if name in self.times:
dt = self.smoothing * dt + (1 - self.smoothing) * self.times[name]
self.times[name] = dt
self.will_print[name] = True
self.last_time = now
if printout:
print('%s=%.2f s' %(name, dt))
def print(self, text='Timer'):
total = 0.
print('[{}]'.format(text), end=' ')
for key in self.times:
val = self.times[key]
if self.will_print[key]:
print('%s=%.3f' % (key, val), end=' ')
total += val
print('total=%.3f sec {%.1f FPS}' % (total, 1./total), end=' ')
if self.newline:
print(flush=True)
else:
print(end='\r', flush=True)
self.reset()
def load_encoder_img(impath, resize):
""" Read image as grayscale and resize to img_size.
Inputs
impath: Path to input image.
Returns
grayim: uint8 numpy array sized H x W.
"""
grayim = cv2.imread(impath, 0)
if grayim is None:
raise Exception('Error reading image %s' % impath)
w, h = grayim.shape[1], grayim.shape[0]
w_new, h_new = process_resize(w, h, resize)
grayim = cv2.resize(
grayim, (w_new, h_new), interpolation=cv2.INTER_AREA)
return grayim
class VideoStreamer:
""" Class to help process image streams. Four types of possible inputs:"
1.) USB Webcam.
2.) An IP camera
3.) A directory of images (files in directory matching 'image_glob').
4.) A video file, such as an .mp4 or .avi file.
"""
def __init__(self, opt):
self._ip_grabbed = False
self._ip_running = False
self._ip_camera = False
self._ip_image = None
self._ip_index = 0
self.cap = []
self.camera = True
self.video_file = False
self.listing = []
self.resize = opt.resize
self.scale = opt.Init_height*0.00895404+0.01114674 if opt.Init_height else 1.0
self.interp = cv2.INTER_AREA
self.i = 0
self.skip = opt.skip
self.bin_interval = opt.bin_interval
self.max_length = opt.max_length
basedir = opt.input
image_glob = opt.image_glob
if isinstance(basedir, int) or basedir.isdigit():
print('==> Processing USB webcam input: {}'.format(basedir))
self.cap = cv2.VideoCapture(int(basedir))
self.listing = range(0, self.max_length)
elif basedir.startswith(('http', 'rtsp')):
print('==> Processing IP camera input: {}'.format(basedir))
# Available Streams are: [144p, 240p, 360p, 480p, 720p, 1080p, best, worst]
options = {"STREAM_RESOLUTION": "720p", 'CAP_PROP_FPS':5, "THREADED_QUEUE_MODE": False}
self.stream = CamGear(source=basedir, stream_mode = True, logging=True, **options).start() # YouTube Video URL as input
self._ip_camera = True
self.listing = range(0, self.max_length)
opt.KF_dt = 1.0/options['CAP_PROP_FPS']
opt.patience = int(opt.patience*options['CAP_PROP_FPS']/opt.skip)
print('==> Stop if UAV GPS not updated over {} frames'.format(opt.patience))
elif Path(basedir).is_dir():
print('==> Processing image directory input: {}'.format(basedir))
self.listing = list(Path(basedir).glob(image_glob[0]))
for j in range(1, len(image_glob)):
image_path = list(Path(basedir).glob(image_glob[j]))
self.listing = self.listing + image_path
self.listing.sort()
self.listing = self.listing[::self.skip]
self.max_length = np.min([self.max_length, len(self.listing)])
if self.max_length == 0:
raise IOError('No images found (maybe bad \'image_glob\' ?)')
self.listing = self.listing[:self.max_length]
self.camera = False
print('==> Stop if UAV GPS not updated over {} frames'.format(opt.patience))
elif Path(basedir).exists():
print('==> Processing video input: {}'.format(basedir))
self.cap = cv2.VideoCapture(basedir)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
num_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.listing = range(0, num_frames)
self.listing = self.listing[::self.skip]
self.video_file = True
self.max_length = np.min([self.max_length, len(self.listing)])
self.listing = self.listing[:self.max_length]
opt.KF_dt = 1.0/(self.cap.get(cv2.CAP_PROP_FPS)/opt.skip)
opt.patience = int(opt.patience*self.cap.get(cv2.CAP_PROP_FPS)/opt.skip)
print('==> Stop if UAV GPS not updated over {} frames'.format(opt.patience))
else:
raise ValueError('VideoStreamer input \"{}\" not recognized.'.format(basedir))
def load_image(self, impath, rotate, bins):
""" Read image as grayscale and resize to img_size.
Inputs
impath: Path to input image.
Returns
grayim: uint8 numpy array sized H x W.
"""
grayim = cv2.imread(impath, 0)
if grayim is None:
raise Exception('Error reading image %s' % impath)
w, h = grayim.shape[1], grayim.shape[0]
w_resize, h_resize = int(self.resize[0]*self.scale), int(self.resize[1]*self.scale)
w_new, h_new = process_resize(w, h, (w_resize, h_resize))
grayim = cv2.resize(
grayim, (w_new, h_new), interpolation=self.interp)
if rotate:
angle = bins*self.bin_interval
grayim = self.rotate_image(grayim, angle) # angle>0, rotate image counterclockwise
# w_rotate, h_rotate = grayim.shape[1], grayim.shape[0]
# scales = (float(w) / float(w_rotate), float(h) / float(h_rotate))
return grayim
def next_frame(self, scale, go_next=True, rotate=False, bins=0):
""" Return the next frame, and increment internal counter.
Returns
image: Next H x W image.
status: True or False depending whether image was loaded.
"""
if (self.i==self.max_length):
return (None, False)
#update image scale
self.scale = self.scale*scale
if self.camera:
if self._ip_camera:
#Wait for first image, making sure we haven't exited
time.sleep(.001)
image = self.stream.read()
else:
ret, image = self.cap.read()
if ret is False or image is None:
print('VideoStreamer: Cannot get image from camera')
return (None, False)
w, h = image.shape[1], image.shape[0]
if self.video_file:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.listing[self.i])
w_resize, h_resize = int(self.resize[0]*self.scale), int(self.resize[1]*self.scale)
w_new, h_new = process_resize(w, h, (w_resize, h_resize))
image = cv2.resize(image, (w_new, h_new),
interpolation=self.interp)
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if rotate:
angle = bins*self.bin_interval
image = self.rotate_image(image, angle) # angle>0, rotate image counterclockwise
else:
image_file = str(self.listing[self.i])
image = self.load_image(image_file, rotate, bins)
self.i = self.i + 1 if go_next else self.i
return (image, True)
def start_ip_camera_thread(self):
self._ip_thread = Thread(target=self.update_ip_camera, args=())
self._ip_running = True
self._ip_thread.start()
self._ip_exited = False
return self
def update_ip_camera(self):
while self._ip_running:
ret, img = self.cap.read()
if ret is False:
self._ip_running = False
self._ip_exited = True
self._ip_grabbed = False
return
self._ip_image = img
self._ip_grabbed = ret
self._ip_index += 1
#print('IPCAMERA THREAD got frame {}'.format(self._ip_index))
def rotate_image(self, mat, angle):
"""
Rotates an image (angle in degrees) and expands image to avoid cropping
"""
height, width = mat.shape[:2] # image shape has 3 dimensions
image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
# rotate image with the new bounds and translated rotation matrix
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
def cleanup(self):
self._ip_running = False
# --- PREPROCESSING ---
def process_resize(w, h, resize):
assert(len(resize) > 0 and len(resize) <= 2)
if len(resize) == 1 and resize[0] > -1:
scale = resize[0] / max(h, w)
w_new, h_new = int(round(w*scale)), int(round(h*scale))
elif len(resize) == 1 and resize[0] == -1:
w_new, h_new = w, h
else: # len(resize) == 2:
w_new, h_new = resize[0], resize[1]
# Issue warning if resolution is too small or too large.
if max(w_new, h_new) < 160:
print('Warning: input resolution is very small, results may vary')
elif max(w_new, h_new) > 2000:
print('Warning: input resolution is very large, results may vary')
return w_new, h_new
def frame2tensor(frame, device):
return torch.from_numpy(frame/255.).float()[None, None].to(device)
def read_image(path, device, resize, rotation, resize_float):
image = cv2.imread(str(path), cv2.IMREAD_GRAYSCALE)
if image is None:
return None, None, None
w, h = image.shape[1], image.shape[0]
w_new, h_new = process_resize(w, h, resize)
scales = (float(w) / float(w_new), float(h) / float(h_new))
if resize_float:
image = cv2.resize(image.astype('float32'), (w_new, h_new))
else:
image = cv2.resize(image, (w_new, h_new)).astype('float32')
if rotation != 0:
image = np.rot90(image, k=rotation)
if rotation % 2:
scales = scales[::-1]
inp = frame2tensor(image, device)
return image, inp, scales
def remove_kpts_on_building(features, labels):
# screen out basemap keypoints belonging to building
keys = ['keypoints0', 'scores0', 'descriptors0']
kpts = features['keypoints0'].astype('int')
scores = features['scores0']
descriptors = features['descriptors0']
valid = labels==0
kpts = features['keypoints0'][valid]
descriptors = ((descriptors.T)[valid]).T
scores = scores[valid]
return {'keypoints0':kpts, 'scores0':scores, 'descriptors0':descriptors}
def segment_keypoints(valid, labels, threshold):
ground = labels==0
building = labels==1
grounds = np.logical_and(valid, ground)
buildings = np.logical_and(valid, building)
grounds_sum = sum(grounds) # number of matched non-building keypoints
buildings_sum = sum(buildings) # number of matched building keypoints
# # if non-building valid num<threshold and building valid>threshold, select matched building else select non-building keypoints for localization
# if (grounds_sum<threshold and buildings_sum>threshold) or buildings_sum-grounds_sum>threshold/2:
# return buildings, grounds, False # use buidling kpts for geolcoalization
# return grounds, buildings, True
if grounds_sum>=threshold:
if buildings_sum/grounds_sum<3:
return grounds, buildings, True, (grounds_sum, buildings_sum)
else:
return buildings, grounds, False, (grounds_sum, buildings_sum)
elif buildings_sum>=threshold:
return buildings, grounds, False, (grounds_sum, buildings_sum) # use buidling kpts for geolcoalization
else:
return valid, None, True, (grounds_sum, buildings_sum)
def update_last_data(satmap_kpts, mask, spindex, bbox, device):
xmin, ymin, xmax, ymax = bbox
image0, keypoints0, descriptors0, scores0 = satmap_kpts
matches = spindex.intersect((xmin, ymin, xmax-1, ymax-1)) # quadtree will include lower right boundary, so -1 to exclude keypoints lying on that boundary
keypoints0_ = keypoints0[matches]-[xmin, ymin]
scores0 = scores0[matches]
descriptors0 = descriptors0[:,matches]
keypoints0 = torch.from_numpy(keypoints0_).float().to(device)
scores0 = torch.from_numpy(scores0).float().to(device)
descriptors0 = torch.from_numpy(descriptors0).float().to(device)
image0 = frame2tensor(image0[ymin:ymax, xmin:xmax], device)
last_data = {'keypoints0':[keypoints0], 'scores0':[scores0], 'descriptors0':[descriptors0], 'image0':image0}
if mask is not None:
update_mask = mask[ymin:ymax, xmin:xmax]
# print(range, update_mask.shape)
keypoints0_ = keypoints0_.astype('int')
labels = update_mask[keypoints0_[:,1], keypoints0_[:,0]]
return last_data, labels
else:
return last_data, None | 14,700 | 38.732432 | 157 | py |
bmm | bmm-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'bmm'
copyright = '2021, Sam Duffield'
author = 'Sam Duffield'
# The full version, including alpha/beta/rc tags
release = '1.1'
version = '1.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx_rtd_theme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
autodoc_typehints = "description"
html4_writer = True
| 2,323 | 34.753846 | 79 | py |
STEP | STEP-master/src/train_gnn.py | import pytorch_lightning as pyl
import torch
import torch.nn.functional as F
import numpy as np
import datasets as dataset
import torch.utils.data
import sklearn
from option import args
from model.tgat import TGAT
class ModelLightning(pyl.LightningModule):
def __init__(self, config, backbone):
super().__init__()
self.config = config
self.backbone = backbone
pass
def forward(self, batch):
##ToDo
x = self.backbone(
batch['src_edge_feat'],
batch['src_edge_to_time'],
batch['src_center_node_idx'],
batch['src_neigh_edge'],
batch['src_node_features']
)
return x
def training_step(self, batch, batch_idx):
logits = self(batch)
lables = batch['labels']
loss = F.binary_cross_entropy_with_logits(
logits, lables, reduction='none')
loss = torch.mean(loss)
self.log("loss2", loss, on_step=True, prog_bar=True, logger=False)
return loss
def validation_step(self, batch, batch_idx):
org_logits = self(batch).sigmoid()
return {'org_proba': org_logits, 'label':batch['labels']}
def validation_epoch_end(self, outputs):
org_pred = torch.cat([output['org_proba'] for output in outputs])
label = torch.cat([output['label'] for output in outputs])
if torch.sum(label > 0):
org_valid_auc = sklearn.metrics.roc_auc_score(label.cpu().numpy().flatten(), org_pred.cpu().numpy().flatten())
else:
org_valid_auc = 0
self.log('org_valid_auc', org_valid_auc, sync_dist=True)
self.log('learning rate', self.optimizers(0).param_groups[0]['lr'])
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.config.learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=10, gamma=0.7)
return [optimizer], [scheduler]
def backward(
self, loss, *args, **kargs):
super().backward(loss, *args, **kargs)
for p in self.parameters():
if (p.grad is not None and torch.any(torch.isnan(p.grad))) or \
torch.any(torch.isnan(p)):
raise RuntimeError('nan happend')
pass
pass
def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None):
scores, _ = self(batch)
proba = torch.sigmoid(scores)
labels = batch['labels']
return proba.cpu().numpy().flatten(), labels.cpu().numpy().flatten()
if __name__=='__main__':
config = args
dataset_train = dataset.DygDataset(config, 'train')
dataset_valid = dataset.DygDataset(config, 'valid')
gpus = None if config.gpus == 0 else config.gpus
collate_fn = dataset.Collate(config)
backbone = TGAT(config)
model = ModelLightning(
config, backbone=backbone)
loader_train = torch.utils.data.DataLoader(
dataset=dataset_train,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
pin_memory=True,
#sampler=dataset.RandomDropSampler(dataset_train, 0),
collate_fn=collate_fn.dyg_collate_fn
)
loader_valid = torch.utils.data.DataLoader(
dataset=dataset_valid,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
collate_fn=collate_fn.dyg_collate_fn
)
trainer = pyl.Trainer(
logger=pyl.loggers.CSVLogger('../lightning_logs_gnn'),
gradient_clip_val=0.1,
replace_sampler_ddp=False,
max_epochs=10,
gpus=gpus
)
trainer.fit(
model, train_dataloaders=loader_train,
val_dataloaders=loader_valid
) | 3,844 | 28.128788 | 122 | py |
STEP | STEP-master/src/datasets_edge.py | import torch
import torch.utils.data
import os
import numpy as np
import random
import pandas as pd
class Data:
def __init__(self, sources, destinations, timestamps, edge_idxs, labels):
self.sources = sources
self.destinations = destinations
self.timestamps = timestamps
self.edge_idxs = edge_idxs
self.labels = labels
self.n_interactions = len(sources)
self.unique_nodes = set(sources) | set(destinations)
self.n_unique_nodes = len(self.unique_nodes)
class EdgeDataset(torch.utils.data.Dataset):
def __init__(self, config):
self.config = config
dataset_name = '{}/ml_{}'.format(self.config.dir_data, self.config.data_set)
self.full_data, self.positive_eids, self.edge_features, self.node_features = \
self.get_data(dataset_name)
self.index_start = self.positive_eids[0]
def get_data(self, dataset_name):
graph_df = pd.read_csv('{}.csv'.format(dataset_name))
edge_features = np.load('{}.npy'.format(dataset_name))
node_features = np.load('{}_node.npy'.format(dataset_name))
sources = graph_df.u.values
destinations = graph_df.i.values
edge_idxs = graph_df.idx.values
labels = graph_df.label.values
timestamps = graph_df.ts.values
random.seed(2020)
positive_eids = np.where(timestamps >= 0)[0]
full_data = Data(sources, destinations, timestamps, edge_idxs, labels)
return full_data, positive_eids, edge_features, node_features
def __getitem__(self, item):
item += self.index_start
edge_idx = self.full_data.edge_idxs[item]
edge_feature = self.edge_features[edge_idx]
edge_idx = np.array(edge_idx)
return {
'edge_feature': torch.from_numpy(edge_feature.astype(np.float32)).reshape(1,-1),
'edge_idx': torch.from_numpy(edge_idx).reshape(1)
}
def __len__(self):
return len(self.positive_eids)
class Collate:
def __init__(self, config):
self.config = config
def dyg_collate_fn(self, batch):
edge_feature = torch.cat([b['edge_feature'] for b in batch], dim=0) #n1,f
edge_idx = torch.cat([b['edge_idx'] for b in batch], dim=0) # n
return {
'edge_feature':edge_feature,
'edge_idx': edge_idx
}
class RandomDropSampler(torch.utils.data.Sampler):
r"""Samples elements sequentially, always in the same order.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, dataset, drop_rate):
self.dataset = dataset
self.drop_rate = drop_rate
self.drop_num = int(len(dataset) * drop_rate)
def __iter__(self):
arange = np.arange(len(self.dataset))
np.random.shuffle(arange)
#indices = arange[: (1 - self.drop_num)]
#return iter(np.sort(indices))
indices = arange
return iter(indices)
def __len__(self):
return len(self.dataset) - self.drop_num
| 3,073 | 26.693694 | 92 | py |
STEP | STEP-master/src/datasets.py | import torch
import torch.utils.data
import os
import numpy as np
from option import args
import random
import pandas as pd
from utils import get_neighbor_finder, masked_get_neighbor_finder
from operator import itemgetter
class Data:
def __init__(self, sources, destinations, timestamps, edge_idxs, labels):
self.sources = sources
self.destinations = destinations
self.timestamps = timestamps
self.edge_idxs = edge_idxs
self.labels = labels
self.n_interactions = len(sources)
self.unique_nodes = set(sources) | set(destinations)
self.n_unique_nodes = len(self.unique_nodes)
class DygDataset(torch.utils.data.Dataset):
def __init__(self, config, split_flag, split_list=[0.7, 0.15, 0.15]):
self.config = config
dataset_name = '{}/ml_{}'.format(self.config.dir_data, self.config.data_set)
self.full_data, self.positive_eids, self.edge_features, self.node_features = \
self.get_data(dataset_name, split_flag, split_list)
if self.config.mask_edge:
id_list = []
edge_score_list = []
with open(self.config.output_edge_txt) as f:
for idx, line in enumerate(f):
e = line.strip().split('\t')
id = int(e[0])
pred_score = float(e[1])
id_list.append(id)
edge_score_list.append(pred_score)
edge_score_dict = dict(zip(id_list,edge_score_list))
self.ngh_finder = masked_get_neighbor_finder(self.full_data, edge_score_dict, self.config.pruning_ratio,uniform=False)
else:
self.ngh_finder = get_neighbor_finder(self.full_data, uniform=False)
self.index_start = self.positive_eids[0]
def get_data(self, dataset_name, split_flag, split_list):
graph_df = pd.read_csv('{}.csv'.format(dataset_name))
edge_features = np.load('{}.npy'.format(dataset_name))
node_features = np.load('{}_node.npy'.format(dataset_name))
val_time, test_time = list(np.quantile(graph_df.ts, [split_list[0], split_list[0]+ split_list[1]]))
sources = graph_df.u.values
destinations = graph_df.i.values
edge_idxs = graph_df.idx.values
labels = graph_df.label.values
timestamps = graph_df.ts.values
random.seed(2020)
train_mask = np.where(timestamps <= val_time)[0]
test_mask = np.where(timestamps > test_time)[0]
val_mask = np.where(np.logical_and(timestamps <= test_time, timestamps > val_time))[0]
full_data = Data(sources, destinations, timestamps, edge_idxs, labels)
if split_flag == 'train':
positive_eids = train_mask
pass
elif split_flag == 'valid':
positive_eids = val_mask
pass
elif split_flag == 'test':
positive_eids = test_mask
pass
else:
raise RuntimeError(f'no recognize split: {split_flag}')
return full_data, positive_eids, edge_features, node_features
def edge_padding(self, neigh_edge, neigh_time, edge_feat, src_neigh_idx, source_node):
neigh_edge = np.concatenate((neigh_edge, np.tile(source_node.reshape(-1, 1), (1, 2))), axis=0)
neigh_time = np.concatenate((neigh_time, np.zeros([1], dtype=neigh_time.dtype)), axis=0)
edge_feat = np.concatenate((edge_feat, np.zeros([1, edge_feat.shape[1]], dtype=edge_feat.dtype)), axis=0)
src_neigh_idx = np.concatenate((src_neigh_idx, np.zeros([1], dtype=src_neigh_idx.dtype)), axis=0)
return neigh_edge, neigh_time, edge_feat, src_neigh_idx
def __getitem__(self, item):
item += self.index_start
source_node = self.full_data.sources[item]
target_node = self.full_data.destinations[item]
current_time = self.full_data.timestamps[item]
label = self.full_data.labels[item]
edge_idx = self.full_data.edge_idxs[item]
src_neigh_edge, src_neigh_time, src_neigh_idx = self.ngh_finder.get_temporal_neighbor_all(source_node,
current_time,
self.config.n_layer,
self.config.n_neighbors)
src_edge_feature = self.edge_features[src_neigh_idx].astype(np.float32)
src_edge_to_time = current_time - src_neigh_time
src_center_node_idx = np.reshape(source_node, [-1])
if src_neigh_edge.shape[0] == 0:
src_neigh_edge, src_edge_to_time, src_edge_feature, src_neigh_idx = self.edge_padding(
src_neigh_edge, src_edge_to_time, src_edge_feature, src_neigh_idx, src_center_node_idx)
label = np.reshape(label, [-1])
return {
'src_center_node_idx': src_center_node_idx,
'src_neigh_edge': torch.from_numpy(src_neigh_edge),
'src_edge_feature': torch.from_numpy(src_edge_feature),
'src_edge_to_time': torch.from_numpy(src_edge_to_time.astype(np.float32)),
'init_edge_index': torch.from_numpy(src_neigh_idx),
'label': torch.from_numpy(label)
}
def __len__(self):
return len(self.positive_eids)
class Collate:
def __init__(self, config):
self.config = config
dataset_name = '{}/ml_{}'.format(self.config.dir_data, self.config.data_set)
self.node_features = np.load('{}_node.npy'.format(dataset_name)).astype(np.float32)
def reindex_fn(self, edge_list, center_node_idx, batch_idx):
edge_list_projection = edge_list.view(-1).numpy().tolist()
edge_list_projection = [str(x) for x in edge_list_projection]
single_batch_idx = torch.unique(batch_idx).numpy().astype(np.int32).tolist()
single_batch_idx = [str(x) for x in single_batch_idx]
batch_idx_projection = batch_idx.reshape([-1, 1]).repeat((1, 2)).view(-1).numpy().astype(np.int32).tolist()
batch_idx_projection = [str(x) for x in batch_idx_projection]
center_node_idx_projection = center_node_idx.tolist()
center_node_idx_projection = [str(x) for x in center_node_idx_projection]
union_edge_list = list(map(lambda x: x[0] + '_' + x[1], zip(batch_idx_projection, edge_list_projection)))
union_center_node_list = list(
map(lambda x: x[0] + '_' + x[1], zip(single_batch_idx, center_node_idx_projection)))
org_node_id = union_edge_list + union_center_node_list
org_node_id = list(set(org_node_id))
new_node_id = torch.arange(0, len(org_node_id)).numpy()
reid_map = dict(zip(org_node_id, new_node_id))
true_org_node_id = [int(x.split('_')[1]) for x in org_node_id]
true_org_node_id = np.array(true_org_node_id)
keys = union_edge_list
new_edge_list = itemgetter(*keys)(reid_map)
new_edge_list = np.array(new_edge_list).reshape([-1, 2])
new_edge_list = torch.from_numpy(new_edge_list)
batch_node_features = self.node_features[true_org_node_id]
new_center_node_idx = np.array(itemgetter(*union_center_node_list)(reid_map))
return new_center_node_idx, new_edge_list, batch_node_features
def get_batchidx_fn(self, edge_list):
batch_size = len(edge_list)
feat_max_len = np.sum([feat.shape[0] for feat in edge_list])
mask = torch.zeros((feat_max_len))
count = 0
for i, ifeat in enumerate(edge_list):
size = ifeat.shape[0]
mask[count:count+size] = i + 1
count += size
return mask
def dyg_collate_fn(self, batch):
src_edge_feat = torch.cat([b['src_edge_feature'] for b in batch], dim=0) #n1,f
src_edge_to_time = torch.cat([b['src_edge_to_time'] for b in batch], dim=0) #n
init_edge_index = torch.cat([b['init_edge_index'] for b in batch], dim=0) # n
src_center_node_idx = np.concatenate([b['src_center_node_idx'] for b in batch], axis=0) #b
batch_idx = self.get_batchidx_fn([b['src_neigh_edge'] for b in batch])
src_neigh_edge = torch.cat([b['src_neigh_edge'] for b in batch], dim=0) #n,2
src_center_node_idx, src_neigh_edge, src_node_features = self.reindex_fn(src_neigh_edge, src_center_node_idx, batch_idx)
label = torch.cat([b['label'] for b in batch], dim=0)
return {
'src_edge_feat':src_edge_feat,
'src_edge_to_time':src_edge_to_time,
'src_center_node_idx':torch.from_numpy(src_center_node_idx),
'src_neigh_edge':src_neigh_edge,
'src_node_features': torch.from_numpy(src_node_features),
'init_edge_index': init_edge_index,
'batch_idx': batch_idx,
'labels':label
}
class RandomDropSampler(torch.utils.data.Sampler):
r"""Samples elements sequentially, always in the same order.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, dataset, drop_rate):
self.dataset = dataset
self.drop_rate = drop_rate
self.drop_num = int(len(dataset) * drop_rate)
def __iter__(self):
arange = np.arange(len(self.dataset))
np.random.shuffle(arange)
#indices = arange[: (1 - self.drop_num)]
#return iter(np.sort(indices))
indices = arange
return iter(indices)
def __len__(self):
return len(self.dataset) - self.drop_num
if __name__ == '__main__':
config = args
a = DygDataset(config, 'train')
#a = DygDatasetTest(config, 'val')
c = a[5000]
#print(c)
| 9,798 | 39.159836 | 130 | py |
STEP | STEP-master/src/eval_gnn.py | import pytorch_lightning as pyl
import torch
import torch.nn.functional as F
import numpy as np
import datasets as dataset
import torch.utils.data
import sklearn
from option import args
from model.tgat import TGAT
class ModelLightning(pyl.LightningModule):
def __init__(self, config, backbone):
super().__init__()
self.config = config
self.backbone = backbone
pass
def forward(self, batch):
##ToDo
x = self.backbone(
batch['src_edge_feat'],
batch['src_edge_to_time'],
batch['src_center_node_idx'],
batch['src_neigh_edge'],
batch['src_node_features']
)
return x
def training_step(self, batch, batch_idx):
logits = self(batch)
lables = batch['labels']
loss = F.binary_cross_entropy_with_logits(
logits, lables, reduction='none')
loss = torch.mean(loss)
self.log("loss2", loss, on_step=True, prog_bar=True, logger=False)
return loss
def validation_step(self, batch, batch_idx):
org_logits = self(batch).sigmoid()
return {'org_proba': org_logits, 'label': batch['labels']}
def validation_epoch_end(self, outputs):
org_pred = torch.cat([output['org_proba'] for output in outputs])
label = torch.cat([output['label'] for output in outputs])
if torch.sum(label > 0):
org_valid_auc = sklearn.metrics.roc_auc_score(label.cpu().numpy().flatten(), org_pred.cpu().numpy().flatten())
else:
org_valid_auc = 0
self.log('org_valid_auc', org_valid_auc, sync_dist=True)
self.log('learning rate', self.optimizers(0).param_groups[0]['lr'])
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.config.learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=10, gamma=0.7)
return [optimizer], [scheduler]
def backward(
self, loss, *args, **kargs):
super().backward(loss, *args, **kargs)
for p in self.parameters():
if (p.grad is not None and torch.any(torch.isnan(p.grad))) or \
torch.any(torch.isnan(p)):
raise RuntimeError('nan happend')
pass
pass
def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None):
scores = self(batch).sigmoid()
labels = batch['labels']
return scores.cpu().numpy().flatten(), labels.cpu().numpy().flatten()
if __name__=='__main__':
config = args
dataset_valid = dataset.DygDataset(config, 'test')
gpus = None if config.gpus == 0 else config.gpus
collate_fn = dataset.Collate(config)
backbone = TGAT(config)
model = ModelLightning(
config, backbone=backbone)
ckpt_file = config.ckpt_file
pretrained_dict = torch.load(ckpt_file)['state_dict']
model_dict = model.state_dict()
state_dict = {k:v for k,v in pretrained_dict.items() if
k.split('.')[1] in ['embedding_module', 'time_encoder', 'node_preocess_fn',
'edge_preocess_fn', 'affinity_score']}
model_dict.update(state_dict)
model.load_state_dict(model_dict)
model.eval()
loader_valid = torch.utils.data.DataLoader(
dataset=dataset_valid,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
collate_fn=collate_fn.dyg_collate_fn
)
trainer = pyl.Trainer(
logger=pyl.loggers.CSVLogger('../lightning_logs_gnn'),
gradient_clip_val=0.1,
replace_sampler_ddp=False,
max_epochs=10,
gpus=gpus
)
with torch.no_grad():
pred = trainer.predit(model, loader_valid)
pass
prob, label = [x[0] for x in pred], [x[1] for x in pred]
prob = np.hstack(prob)
label = np.hstack(label)
org_valid_auc = sklearn.metrics.roc_auc_score(label.astype(int), prob)
print('test_acu>>>>>>>>>>>>>>>>>>', org_valid_auc) | 4,116 | 29.272059 | 122 | py |
STEP | STEP-master/src/edge_pruning.py | import pytorch_lightning as pyl
import torch
import torch.nn.functional as F
import numpy as np
import datasets_edge as dataset
import torch.utils.data
import sklearn
from option import args
from model.precom_model import Precom_Model
class ModelLightning(pyl.LightningModule):
def __init__(self, config, backbone):
super().__init__()
self.config = config
self.backbone = backbone
pass
def forward(self, batch):
##ToDo
x = self.backbone(
batch['edge_feature']
)
return x
def training_step(self, batch, batch_idx):
logits = self(batch)
lables = batch['labels']
loss = F.binary_cross_entropy_with_logits(
logits, lables, reduction='none')
loss = torch.mean(loss)
self.log("loss2", loss, on_step=True, prog_bar=True, logger=False)
return loss
def validation_step(self, batch, batch_idx):
org_logits = self(batch).sigmoid()
return {'org_proba': org_logits, 'label': batch['labels']}
def validation_epoch_end(self, outputs):
org_pred = torch.cat([output['org_proba'] for output in outputs])
label = torch.cat([output['label'] for output in outputs])
if torch.sum(label > 0):
org_valid_auc = sklearn.metrics.roc_auc_score(label.cpu().numpy().flatten(), org_pred.cpu().numpy().flatten())
else:
org_valid_auc = 0
self.log('org_valid_auc', org_valid_auc, sync_dist=True)
self.log('learning rate', self.optimizers(0).param_groups[0]['lr'])
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.config.learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=10, gamma=0.7)
return [optimizer], [scheduler]
def backward(
self, loss, *args, **kargs):
super().backward(loss, *args, **kargs)
for p in self.parameters():
if (p.grad is not None and torch.any(torch.isnan(p.grad))) or \
torch.any(torch.isnan(p)):
raise RuntimeError('nan happend')
pass
pass
def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None):
scores = self(batch)
proba = torch.softmax(scores, dim=1)[:, 1]
edge_index = batch['edge_idx']
return edge_index.cpu().numpy().flatten(), proba.cpu().numpy().flatten()
if __name__=='__main__':
config = args
datasets = dataset.EdgeDataset(config)
gpus = None if config.gpus == 0 else config.gpus
collate_fn = dataset.Collate(config)
backbone = Precom_Model(config.input_dim, config.hidden_dim, config.drop_out)
model = ModelLightning(
config, backbone=backbone)
ckpt_file = config.ckpt_file
pretrained_dict = torch.load(ckpt_file)['state_dict']
model_dict = model.state_dict()
state_dict = {k:v for k,v in pretrained_dict.items() if k.split('.')[1] in ['edge_precom'] }
rename_state_dict = { k.replace('.edge_precom', ''):v for k,v in state_dict.items()}
model_dict.update(rename_state_dict)
model.load_state_dict(model_dict)
dataloader = torch.utils.data.DataLoader(
dataset=datasets,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
collate_fn=collate_fn.dyg_collate_fn
)
trainer = pyl.Trainer(
accelerator=config.accelerator,
gpus=gpus
)
with torch.no_grad():
pred = trainer.predict(
model, dataloader)
pass
#edge_id = np.hstack(edge_idx)
edge_id, pred_score = [x[0] for x in pred], [x[1] for x in pred]
edge_id = np.hstack(edge_id)
pred_score = np.hstack(pred_score)
output_file = config.output_edge_txt
with open(output_file, 'w') as fout:
for i, (id, score) in enumerate(zip(edge_id, pred_score)):
fout.write(f'{id}\t')
fout.write(f'{score}\n')
pass
pass
pass | 4,096 | 29.125 | 122 | py |
STEP | STEP-master/src/train_gsn.py | import pytorch_lightning as pyl
import torch
import datasets as dataset
import torch.utils.data
from option import args
from model.tgat import TGAT
class ModelLightning(pyl.LightningModule):
def __init__(self, config, backbone):
super().__init__()
self.config = config
self.backbone = backbone
pass
def forward(self, batch):
##ToDo
x = self.backbone.forward_gsn(
batch['src_edge_feat'],
batch['src_edge_to_time'],
batch['src_center_node_idx'],
batch['src_neigh_edge'],
batch['src_node_features'],
batch['init_edge_index'],
batch['batch_idx'],
self.global_step
)
return x
def training_step(self, batch, batch_idx):
x = self(batch)
if self.global_step > 500:
lambda1 = 0.01
else:
lambda1 = 0
loss_mi = x['loss']
loss_sparse = x['loss_sparse']
loss_edge_pre = x['loss_edge_pred']
self.log('loss_mi', loss_mi, on_step=True, prog_bar=True, logger=False)
self.log('loss_sparse', loss_sparse, on_step=True, prog_bar=True, logger=False)
self.log('loss_edge_pre', loss_edge_pre, on_step=True, prog_bar=True, logger=False)
self.log('max_probs', x['max_probs'], on_step=True, prog_bar=True, logger=False)
self.log('min_probs', x['min_probs'], on_step=True, prog_bar=True, logger=False)
loss = loss_mi + 0.01 * loss_sparse + lambda1 * loss_edge_pre
return loss
def validation_step(self, batch, batch_idx):
output = self(batch)
loss_mi = output['loss']
loss_sparse = output['loss_sparse']
loss_edge_pre = output['loss_edge_pred']
return {'loss_mi': loss_mi, 'loss_sparse': loss_sparse, 'loss_edge_pre':loss_edge_pre}
def validation_epoch_end(self, outputs):
loss_mi = torch.cat([output['loss_mi'].reshape([1]) for output in outputs])
loss_sparse = torch.cat([output['loss_sparse'].reshape([1]) for output in outputs])
loss_edge_pre = torch.cat([output['loss_edge_pre'].reshape([1]) for output in outputs])
loss_mi = torch.mean(loss_mi)
loss_sparse = torch.mean(loss_sparse)
loss_edge_pre = torch.mean(loss_edge_pre)
self.log('loss_mi', loss_mi, sync_dist=True)
self.log('loss_sparse', loss_sparse, sync_dist=True)
self.log('loss_edge_pre', loss_edge_pre, sync_dist=True)
self.log('learning rate', self.optimizers(0).param_groups[0]['lr'])
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.config.learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=10, gamma=0.7)
return [optimizer], [scheduler]
def backward(
self, loss, *args, **kargs):
super().backward(loss, *args, **kargs)
for p in self.parameters():
if (p.grad is not None and torch.any(torch.isnan(p.grad))) or \
torch.any(torch.isnan(p)):
raise RuntimeError('nan happend')
pass
pass
def predict_step(self, batch, batch_idx: int, dataloader_idx: int = None):
scores, _ = self(batch)
proba = torch.sigmoid(scores)
labels = batch['labels']
return proba.cpu().numpy().flatten(), labels.cpu().numpy().flatten()
if __name__ == '__main__':
config = args
dataset_train = dataset.DygDataset(config, 'train')
dataset_valid = dataset.DygDataset(config, 'valid')
gpus = None if config.gpus == 0 else config.gpus
collate_fn = dataset.Collate(config)
backbone = TGAT(config)
model = ModelLightning(
config, backbone=backbone)
loader_train = torch.utils.data.DataLoader(
dataset=dataset_train,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
pin_memory=True,
# sampler=dataset.RandomDropSampler(dataset_train, 0),
collate_fn=collate_fn.dyg_collate_fn
)
loader_valid = torch.utils.data.DataLoader(
dataset=dataset_valid,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
collate_fn=collate_fn.dyg_collate_fn
)
checkpoint_callback = pyl.callbacks.ModelCheckpoint(
monitor = None,
save_top_k = -1,
save_last=True
)
trainer = pyl.Trainer(
logger=pyl.loggers.CSVLogger('../lightning_logs_gsn'),
gradient_clip_val=0.1,
replace_sampler_ddp=False,
max_epochs=10,
gpus=gpus,
callbacks=[checkpoint_callback]
)
trainer.fit(
model, train_dataloaders=loader_train,
val_dataloaders=loader_valid
)
| 4,863 | 31.426667 | 95 | py |
STEP | STEP-master/src/modules/time_encoding.py | import torch
import numpy as np
class TimeEncode(torch.nn.Module):
# Time Encoding proposed by TGAT
def __init__(self, dimension):
super(TimeEncode, self).__init__()
self.dimension = dimension
self.w = torch.nn.Linear(1, dimension)
self.w.weight = torch.nn.Parameter((torch.from_numpy(1 / 10 ** np.linspace(0, 1.5, dimension)))
.float().reshape(dimension, -1))
self.w.bias = torch.nn.Parameter(torch.zeros(dimension).float())
def forward(self, t):
# t has shape [batch_size, seq_len]
# Add dimension at the end to apply linear layer --> [batch_size, seq_len, 1]
t = torch.log(t + 1)
t = t.unsqueeze(dim=1)
# output has shape [batch_size, seq_len, dimension]
output = torch.cos(self.w(t))
return output
| 802 | 28.740741 | 99 | py |
STEP | STEP-master/src/modules/utils.py | import numpy as np
import torch
from sklearn.metrics import roc_auc_score
import math
import time
class MergeLayer(torch.nn.Module):
def __init__(self, dim1, dim2, dim3, dim4):
super().__init__()
self.layer_norm = torch.nn.LayerNorm(dim1 + dim2)
self.fc1 = torch.nn.Linear(dim1 + dim2, dim3)
self.fc2 = torch.nn.Linear(dim3, dim4)
self.act = torch.nn.ReLU()
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x1, x2):
x = torch.cat([x1, x2], dim=1)
#x = self.layer_norm(x)
h = self.act(self.fc1(x))
return self.fc2(h) + x2
class MergeLayer_output(torch.nn.Module):
def __init__(self, dim1, dim2, dim3= 1024, dim4=1, drop_out=0.2):
super().__init__()
self.fc1 = torch.nn.Linear(dim1 + dim2, dim3)
self.fc2 = torch.nn.Linear(dim3, dim3)
self.fc3 = torch.nn.Linear(dim3, dim2)
self.fc4 = torch.nn.Linear(dim2 , dim4 )
self.act = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(p=drop_out)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x1, x2):
x = torch.cat([x1, x2], dim=1)
h = self.act(self.fc1(x))
h = self.act(self.fc2(h))
h = self.dropout(self.act(self.fc3(h)))
h = self.fc4(h)
return h
class Feat_Process_Layer(torch.nn.Module):
def __init__(self, dim1, dim2):
super().__init__()
self.fc1 = torch.nn.Linear(dim1, dim2)
self.fc2 = torch.nn.Linear(dim2, dim2)
self.act = torch.nn.ReLU()
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x):
h = self.act(self.fc1(x))
return self.fc2(h)
| 1,731 | 26.935484 | 67 | py |
STEP | STEP-master/src/modules/temporal_attention.py | import torch
import torch_scatter as scatter
from torch import nn
from modules.utils import MergeLayer
class TemporalAttentionLayer2(torch.nn.Module):
"""
Temporal attention layer. Return the temporal embedding of a node given the node itself,
its neighbors and the edge timestamps.
"""
def __init__(self, n_node_features, n_neighbors_features, n_edge_features, time_dim,
output_dimension, n_head=2,
dropout=0.1):
super(TemporalAttentionLayer2, self).__init__()
self.time_dim = time_dim
self.num_heads = n_head
self.reverse_flag = True
self.selfloop_flag = True
self.query_dim = n_node_features + time_dim
self.key_dim = n_node_features + time_dim + n_edge_features
self.out_dim = output_dimension
self.d_k = self.out_dim // self.num_heads
self.scale = self.d_k ** (-0.5)
self.q_linears = torch.nn.Sequential( torch.nn.Linear(self.query_dim, self.out_dim), torch.nn.ReLU())
self.k_linears = torch.nn.Sequential(torch.nn.Linear(self.key_dim, self.out_dim), torch.nn.ReLU())
self.v_linears = torch.nn.Linear(self.key_dim, self.out_dim)
self.dropout = torch.nn.Dropout(dropout)
self.merger = MergeLayer(n_node_features, n_node_features, n_node_features, output_dimension)
def forward(self, node_feature, edge_index, edge_feature, src_time_features, edge_time, mask=None, sample_ratio=None):
'''
:param node_feature:
:param edge_index:
:param edge_feature:
:param src_time_features:
:param edge_time:
:param mask:
:return:
'''
if mask is not None and sample_ratio is None:
edge_index, edge_feature, src_time_features, edge_time = self.mask_edge(edge_index,
edge_feature,
src_time_features,
edge_time,
mask)
if self.reverse_flag:
edge_index, edge_feature, src_time_features, edge_time, sample_ratio = self.reverse_edge(edge_index,
edge_feature,
src_time_features,
edge_time, sample_ratio)
if self.selfloop_flag:
edge_index, edge_feature, src_time_features, edge_time, sample_ratio = self.add_selfloop(node_feature,
edge_index,
edge_feature,
src_time_features,
edge_time, sample_ratio)
node_i = edge_index[:, 0]
node_j = edge_index[:, 1]
node_feat_i = node_feature[node_i, :]
node_feat_j = node_feature[node_j, :]
source_node_vec = torch.cat([node_feat_i, src_time_features], dim=1)
target_node_vec = torch.cat([node_feat_j, edge_feature, edge_time], dim=1)
q_mat = torch.reshape(self.q_linears(source_node_vec), [-1, self.num_heads, self.d_k]) # [T, N , D]
k_mat = torch.reshape(self.k_linears(target_node_vec) , [-1, self.num_heads, self.d_k]) # [T, N , D]
v_mat = torch.reshape(self.v_linears(target_node_vec) , [-1, self.num_heads, self.d_k]) # [T, N , D]
res_att_sub = torch.sum(torch.multiply(q_mat, k_mat), dim=-1 )* self.scale #[T, N]
'''
Softmax based on target node's id (edge_index_i). Store attention value in self.att.
'''
if sample_ratio is not None:
res_att_sub = torch.multiply(res_att_sub, sample_ratio.reshape([-1,1]).repeat(1, self.num_heads))
scores = self.scatter_softmax(res_att_sub, node_i)
#if self.dropout is not None:
# scores = self.dropout(scores)
v = torch.multiply(torch.unsqueeze(scores, dim=2), v_mat)
v = torch.reshape(v, [-1, self.out_dim])
out_emb = scatter.scatter_add(v, node_i, dim=0)
out_emb = self.agg_out(node_feature, out_emb)
return out_emb
def scatter_softmax(self, res_att, node_i):
n_head = self.num_heads
scores = torch.zeros_like(res_att)
for i in range(n_head):
scores[:, i] = scatter.composite.scatter_softmax(res_att[:, i], node_i)
return scores
def reverse_edge(self, edge_index, edge_feature, src_time_features, edge_time, sample_ratio):
reverse_edge_index = torch.cat((edge_index[:, 1].unsqueeze(1), edge_index[:, 0].unsqueeze(1)), dim=1)
two_edge_index = torch.cat((edge_index, reverse_edge_index), dim=0)
src_time_features = src_time_features.repeat(2, 1)
edge_feature = edge_feature.repeat(2, 1)
edge_time = edge_time.repeat(2, 1)
if sample_ratio is not None:
sample_ratio = sample_ratio.repeat(2)
return two_edge_index, edge_feature, src_time_features, edge_time, sample_ratio
def add_selfloop(self, node_feature, edge_index, edge_feature, src_time_features, edge_time, sample_ratio):
time_emb_unit = src_time_features[0, :].reshape(1, -1)
node_id = torch.arange(0, node_feature.shape[0], device=edge_index.device).reshape(-1,1)
edge_index = torch.cat([edge_index, node_id.repeat(1,2)], dim=0)
edge_feature = torch.cat([edge_feature, torch.zeros([node_id.shape[0], edge_feature.shape[1]], dtype=edge_feature.dtype, device=edge_feature.device)], dim=0)
src_time_features = torch.cat([src_time_features, time_emb_unit.repeat(node_id.shape[0], 1)], dim=0)
edge_time = torch.cat([edge_time, time_emb_unit.repeat(node_id.shape[0], 1)], dim=0)
if sample_ratio is not None:
sample_ratio =torch.cat([sample_ratio, torch.ones([node_id.shape[0]], dtype=sample_ratio.dtype, device=sample_ratio.device)])
return edge_index, edge_feature, src_time_features, edge_time, sample_ratio
def mask_edge(self, edge_index, edge_feature, src_time_features, edge_time, mask):
retain_index = torch.nonzero(mask).reshape([-1])
edge_index = edge_index[retain_index]
edge_feature = edge_feature[retain_index]
src_time_features = src_time_features[retain_index]
edge_time = edge_time[retain_index]
return edge_index, edge_feature, src_time_features, edge_time
def agg_out(self, node_feat_pre, node_rep):
out_embedding = self.merger(node_rep, node_feat_pre)
return out_embedding | 6,626 | 43.47651 | 161 | py |
STEP | STEP-master/src/modules/embedding_module.py | import torch
from torch import nn
import numpy as np
import math
from modules.temporal_attention import TemporalAttentionLayer2
class EmbeddingModule(nn.Module):
def __init__(self, time_encoder, n_layers,
node_features_dims, edge_features_dims, time_features_dim, hidden_dim, dropout):
super(EmbeddingModule, self).__init__()
self.time_encoder = time_encoder
self.n_layers = n_layers
self.n_node_features = node_features_dims
self.n_edge_features = edge_features_dims
self.n_time_features = time_features_dim
self.dropout = dropout
self.embedding_dimension = hidden_dim
def compute_embedding(self, neigh_edge, edge_to_time, edge_feat, node_feat):
pass
class GraphEmbedding(EmbeddingModule):
def __init__(self, time_encoder, n_layers,
node_features_dims, edge_features_dims, time_features_dim, hidden_dim, n_heads=2, dropout=0.1):
super(GraphEmbedding, self).__init__(time_encoder, n_layers,
node_features_dims, edge_features_dims, time_features_dim,
hidden_dim, dropout)
def compute_embedding(self, neigh_edge, edge_to_time, edge_feat, node_feat, edge_mask=None, sample_ratio=None):
'''
:param neigh_edge: [E, 2]
:param edge_to_time: [E]
:param edge_feat: [E, D]
:param node_feat: [N, D]
:return:
'''
n_layers = self.n_layers
assert (n_layers >= 0)
temp_node_feat = node_feat
src_time_embeddings = self.time_encoder(torch.zeros_like(edge_to_time))
edge_time_embeddings = self.time_encoder(edge_to_time)
mask = edge_mask
for layer in range(n_layers):
temp_node_feat = self.aggregate(n_layers, temp_node_feat,
neigh_edge,
edge_feat,
src_time_embeddings,
edge_time_embeddings,
mask, sample_ratio)
out = temp_node_feat
return out
def aggregate(self, n_layers, node_features, edge_index,
edge_feature,
src_time_features, edge_time_embeddings, mask, sample_ratio):
return None
# class GraphSumEmbedding(GraphEmbedding):
# def __init__(self, time_encoder, n_layers, node_features_dims, edge_features_dims,
# time_features_dim, hidden_dim, n_heads=2, dropout=0.1):
# super(GraphSumEmbedding, self).__init__( time_encoder=time_encoder, n_layers=n_layers,
# node_features_dims=node_features_dims,
# edge_features_dims=edge_features_dims,
# time_features_dim=time_features_dim,
# hidden_dim=hidden_dim,
# n_heads=n_heads, dropout=dropout)
#
# self.linear_1 = torch.nn.ModuleList([torch.nn.Linear(hidden_dim + time_features_dim +
# edge_features_dims, hidden_dim)
# for _ in range(n_layers)])
# self.linear_2 = torch.nn.ModuleList(
# [torch.nn.Linear(hidden_dim + node_features_dims + time_features_dim,
# hidden_dim) for _ in range(n_layers)])
#
# def aggregate(self, n_layer, source_node_features, source_nodes_time_embedding,
# neighbor_embeddings,
# edge_time_embeddings, edge_features, mask):
# neighbors_features = torch.cat([neighbor_embeddings, edge_time_embeddings, edge_features],
# dim=2)
# neighbor_embeddings = self.linear_1[n_layer - 1](neighbors_features)
# neighbors_sum = torch.nn.functional.relu(torch.sum(neighbor_embeddings, dim=1))
#
# source_features = torch.cat([source_node_features,
# source_nodes_time_embedding.squeeze()], dim=1)
# source_embedding = torch.cat([neighbors_sum, source_features], dim=1)
# source_embedding = self.linear_2[n_layer - 1](source_embedding)
#
# return source_embedding
class GraphAttentionEmbedding(GraphEmbedding):
def __init__(self, time_encoder, n_layers, node_features_dims, edge_features_dims,
time_features_dim, hidden_dim, n_heads=2, dropout=0.1):
super(GraphAttentionEmbedding, self).__init__(time_encoder, n_layers,
node_features_dims, edge_features_dims,
time_features_dim,
hidden_dim,
n_heads, dropout)
self.attention_models = torch.nn.ModuleList([TemporalAttentionLayer2(
n_node_features=node_features_dims,
n_neighbors_features=node_features_dims,
n_edge_features=edge_features_dims,
time_dim=time_features_dim,
n_head=n_heads,
dropout=dropout,
output_dimension=hidden_dim)
for _ in range(n_layers)])
def aggregate(self, n_layer, node_features, edge_index,
edge_feature,
src_time_features, edge_time_embeddings, mask, sample_ratio):
attention_model = self.attention_models[n_layer - 1]
source_embedding = attention_model(node_features,
edge_index,
edge_feature,
src_time_features,
edge_time_embeddings,
mask, sample_ratio)
return source_embedding
def get_embedding_module(module_type, time_encoder, n_layers,
node_features_dims, edge_features_dims, time_features_dim,
hidden_dim, n_heads=2, dropout=0.1):
if module_type == "graph_attention":
return GraphAttentionEmbedding( time_encoder=time_encoder,
n_layers=n_layers,
node_features_dims=node_features_dims,
edge_features_dims=edge_features_dims,
time_features_dim=time_features_dim,
hidden_dim=hidden_dim,
n_heads=n_heads, dropout=dropout)
# elif module_type == "graph_sum":
# return GraphSumEmbedding(time_encoder=time_encoder,
# n_layers=n_layers,
# node_features_dims=node_features_dims,
# edge_features_dims=edge_features_dims,
# time_features_dim=time_features_dim,
# hidden_dim=hidden_dim,
# n_heads=n_heads, dropout=dropout)
else:
raise ValueError("Embedding Module {} not supported".format(module_type))
| 7,015 | 42.57764 | 113 | py |
STEP | STEP-master/src/model/tgat.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_scatter as scatter
from modules.utils import MergeLayer_output, Feat_Process_Layer
from modules.embedding_module import get_embedding_module
from modules.time_encoding import TimeEncode
from model.gsn import Graph_sampling_network
from model.gpn import Graph_pruning_network
class TGAT(torch.nn.Module):
def __init__(self, config, embedding_module_type="graph_attention"):
super().__init__()
self.cfg = config
self.nodes_dim = self.cfg.input_dim
self.edge_dim = self.cfg.input_dim
self.dims = self.cfg.hidden_dim
self.n_heads = self.cfg.n_heads
self.dropout = self.cfg.drop_out
self.n_layers = self.cfg.n_layer
self.mode = self.cfg.mode
self.time_encoder = TimeEncode(dimension=self.dims)
self.embedding_module_type = embedding_module_type
self.embedding_module = get_embedding_module(module_type=embedding_module_type,
time_encoder=self.time_encoder,
n_layers=self.n_layers,
node_features_dims=self.dims,
edge_features_dims=self.dims,
time_features_dim=self.dims,
hidden_dim=self.dims,
n_heads=self.n_heads, dropout=self.dropout)
self.node_preocess_fn = Feat_Process_Layer(self.nodes_dim, self.dims)
self.edge_preocess_fn = Feat_Process_Layer(self.edge_dim, self.dims)
self.affinity_score = MergeLayer_output(self.dims, self.dims, drop_out=0.2)
self.predictor = nn.Sequential(nn.Linear(self.dims, self.dims)) # output layer
self.gsn = Graph_sampling_network(self.dims, self.cfg.batch_size, mask_ratio=self.cfg.prior_ratio)
self.edge_precom = Graph_pruning_network(self.edge_dim, self.dims, self.dropout)
def forward(self, src_org_edge_feat, src_edge_to_time, src_center_node_idx, src_neigh_edge, src_node_features):
# apply tgat
source_node_embedding, src_edge_feat = self.compute_temporal_embeddings(src_neigh_edge, src_edge_to_time,
src_org_edge_feat, src_node_features)
loclsrc_node_embedding = source_node_embedding[src_center_node_idx, :]
score = self.affinity_score(loclsrc_node_embedding, loclsrc_node_embedding)
return score
def forward_gsn(self, src_org_edge_feat, src_edge_to_time, src_center_node_idx, src_neigh_edge, src_node_features,
init_edge_index, batch_idx, step=0):
# apply tgat
source_node_embedding, src_edge_feat = self.compute_temporal_embeddings(src_neigh_edge, src_edge_to_time,
src_org_edge_feat, src_node_features)
loclsrc_node_embedding = source_node_embedding[src_center_node_idx,:]
source_node_embedding_clone = source_node_embedding
src_edge_feat_clone = src_edge_feat
time_encodding = self.time_encoder(src_edge_to_time)
src_edge_probs, src_edge_mask = self.gsn.forward(source_node_embedding_clone, src_neigh_edge, time_encodding,
src_edge_feat_clone, batch_idx, src_center_node_idx)
gsn_node_embedding, _ = self.compute_temporal_embeddings(src_neigh_edge, src_edge_to_time,
src_org_edge_feat, src_node_features,
None, src_edge_probs)
gsnsrc_node_embedding = gsn_node_embedding[src_center_node_idx, :]
unique_edge_label = self.Merge_same_edge(init_edge_index, src_edge_mask)
temp_edge_label = unique_edge_label.long()
edge_logit = self.edge_precom(src_org_edge_feat)
loss_edge_pred = self.edge_precom.loss(edge_logit.reshape([-1, 2]), temp_edge_label)
loss_sparse = self.gsn.sparse_loss(src_edge_probs)
loss_mi = self.ddgcl(loclsrc_node_embedding, gsnsrc_node_embedding)
max_probs = torch.max(src_edge_probs)
min_probs = torch.min(src_edge_probs)
return {'loss': loss_mi, 'loss_sparse': loss_sparse, 'loss_edge_pred':loss_edge_pred,
'edge_index': src_neigh_edge, 'edge_probs': src_edge_probs,
'max_probs':max_probs, 'min_probs':min_probs}
def compute_temporal_embeddings(self, neigh_edge, edge_to_time, edge_feat, node_feat, edge_mask=None, sample_ratio=None):
node_feat = self.node_preocess_fn(node_feat)
edge_feat = self.edge_preocess_fn(edge_feat)
node_embedding = self.embedding_module.compute_embedding(neigh_edge, edge_to_time,
edge_feat, node_feat, edge_mask, sample_ratio)
return node_embedding, edge_feat
def ddgcl(self, x1, x2):
x1 = self.predictor(x1)
l_pos = torch.sigmoid(torch.sum(x1 * x2, dim=-1)).reshape([-1, 1])
l_neg = torch.sigmoid(torch.sum(torch.einsum('nc,kc->nkc', x1, x2), dim=-1))
matrix = torch.diag_embed(torch.diag(l_neg))
l_neg = l_neg - matrix
label1 = torch.ones_like(l_pos)
label2 = torch.zeros_like(l_neg)
logits = torch.cat([l_pos, l_neg], dim=1).reshape([-1])
labels = torch.cat([label1, label2], dim=1).reshape([-1])
loss_bce = torch.nn.BCELoss()
loss = loss_bce(logits, labels)
return loss
def Merge_same_edge(self, init_edge_index, src_edge_mask):
output, _ = scatter.scatter_max(src_edge_mask, init_edge_index, dim=0)
output = output[init_edge_index]
return output | 5,947 | 48.983193 | 125 | py |
STEP | STEP-master/src/model/gsn.py | import torch
import torch.nn.functional as F
import torch_scatter as scatter
class Graph_sampling_network(torch.nn.Module):
def __init__(self, dim, batch_size, mask_ratio=0.5):
super(Graph_sampling_network, self).__init__()
self.mask_act = 'sigmoid'
self.mask_ratio = mask_ratio
self.dim = dim
self.batch_size = batch_size
self.elayers1 = torch.nn.Sequential(
torch.nn.Linear(self.dim * 4, self.dim),
torch.nn.ReLU()
)
self.elayers3 = torch.nn.Sequential(
torch.nn.Linear(2 + self.dim, 1)
#torch.nn.Linear(2, 1)
)
# torch.nn.init.xavier_normal_(self.elayers2.weight)
def concrete_sample(self, log_alpha, beta=1.0):
if self.training:
bias = 0.1
random_noise = torch.empty(log_alpha.shape, dtype=log_alpha.dtype, device=log_alpha.device).uniform_(bias, 1-bias)
gate_inputs = torch.log(random_noise) - torch.log(1-random_noise)
gate_inputs = (gate_inputs + log_alpha) / beta
gate_inputs = torch.sigmoid(gate_inputs)
else:
gate_inputs = torch.sigmoid(log_alpha)
return gate_inputs
def forward(self, node_embeddings, edge_index, time_encodding, edge_feat, batch_idx, src_center_node_idx):
node_i = edge_index[:, 0]
node_j = edge_index[:, 1]
node_feat_i = node_embeddings[node_i, :]
node_feat_j = node_embeddings[node_j, :]
center_node_feat = node_embeddings[src_center_node_idx, :]
h = torch.cat([node_feat_i, node_feat_j, edge_feat, time_encodding], dim=1)
h1 = self.elayers1(h)
redundancy_score = self.redundancy_attention(h1) #[n, 1]
relevance_score = self.relevance_attention(h1, batch_idx.long(), center_node_feat) #[n, 1]
attn_score = torch.cat([redundancy_score, relevance_score, h1], dim=-1)
log_alpha = self.elayers3(attn_score)
edge_sample_probs = self.concrete_sample(log_alpha)
edge_sample_probs = edge_sample_probs.reshape([-1])
_, rank_idx = edge_sample_probs.sort(dim=0)
cut_off_nums = round(edge_sample_probs.shape[0] * self.mask_ratio)
low_idx = rank_idx[:cut_off_nums]
high_idx = rank_idx[cut_off_nums:]
edge_mask = edge_sample_probs.clone().detach()
edge_mask[low_idx] = 0
edge_mask[high_idx] = 1
return edge_sample_probs, edge_mask.byte()
def redundancy_attention(self, x):
x = F.normalize(x, p=2, dim=1)
dots = x @ x.transpose(-1, -2) #[m, m]
attn = torch.softmax(dots, dim=-1)
out = attn - torch.diag_embed(torch.diag(attn))
out = torch.sum(out, dim=-1)
return out.reshape([-1, 1])
def relevance_attention(self, x, batch_id, center_node_feat):
all_node_feat = center_node_feat[batch_id-1, :]
dots = torch.sum(torch.multiply(x, all_node_feat), dim=-1 )
attn = scatter.composite.scatter_softmax(dots, batch_id)
return attn.reshape([-1, 1])
def drop_edge(self, edge_index, batch_idx):
edge_sample_probs = torch.rand(edge_index.shape[0])
# y = torch.unique(batch_idx)
# mask_idx = [ self.get_mask_by_batch_fn(edge_sample_probs, batch_idx, x) for x in y]
# low_idx = torch.cat([x[0] for x in mask_idx], dim=0)
# high_idx= torch.cat([x[1] for x in mask_idx], dim=0)
_, rank_idx = edge_sample_probs.sort(dim=0)
cut_off_nums = round(edge_sample_probs.shape[0] * self.mask_ratio)
low_idx = rank_idx[:cut_off_nums]
high_idx = rank_idx[cut_off_nums:]
edge_mask = edge_sample_probs.clone()
edge_mask[low_idx] = 0
edge_mask[high_idx] = 1
return edge_mask.byte()
def get_mask_by_batch_fn(self, edge_sample_probs, batch_idx, x):
index = torch.nonzero(torch.where(batch_idx == x, batch_idx.clone().detach(), torch.tensor(0.0, device=x.device))).reshape([-1])
edge_sample_probs = edge_sample_probs[index]
_, rank_idx = edge_sample_probs.sort(dim=0)
cut_off_nums = round(edge_sample_probs.shape[0] * self.mask_ratio)
low_idx = rank_idx[:cut_off_nums]
true_low_idx = index[low_idx]
high_idx = rank_idx[cut_off_nums:]
true_high_idx = index[high_idx]
return true_low_idx, true_high_idx
def sparse_loss(self, log_alpha):
var_x = torch.mean(log_alpha * log_alpha) - torch.mean(log_alpha) * torch.mean(log_alpha)
loss_1 = torch.abs(var_x - self.mask_ratio * (1 - self.mask_ratio))
loss_2 = torch.abs(torch.mean(log_alpha) - (1 - self.mask_ratio))
loss = 1 * loss_1 + 1 * loss_2
return loss | 4,736 | 37.201613 | 136 | py |
STEP | STEP-master/src/model/gpn.py | import torch
from modules.utils import MergeLayer_output, Feat_Process_Layer
class Graph_pruning_network(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, drop_out):
super(Graph_pruning_network, self).__init__()
self.edge_dim = input_dim
self.dims = hidden_dim
self.dropout = drop_out
self.affinity_score = Precomput_output(self.edge_dim, self.dims, 2, drop_out=0.2)
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, edge_feat):
edge_logit = self.affinity_score(edge_feat)
return edge_logit
class Precomput_output(torch.nn.Module):
def __init__(self, dim1, dim2, dim3=2, drop_out=0.2):
super().__init__()
self.fc1 = torch.nn.Linear(dim1, dim2)
self.fc2 = torch.nn.Linear(dim2, dim2)
self.fc3 = torch.nn.Linear(dim2, dim3)
self.act = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(p=drop_out)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
torch.nn.init.xavier_normal_(self.fc3.weight)
def forward(self, x):
h = self.act(self.fc1(x))
h = self.dropout(self.act(self.fc2(h)))
h = self.fc3(h)
h = self.concrete_sample(h)
return h
def concrete_sample(self, log_alpha, beta=1.0):
if self.training:
log_alpha = log_alpha.reshape([-1])
bias = 0.1
random_noise = torch.empty(log_alpha.shape, dtype = log_alpha.dtype, device=log_alpha.device).uniform_(bias, 1-bias)
gate_inputs = torch.log(random_noise) - torch.log(1-random_noise)
gate_inputs = (gate_inputs + log_alpha) / beta
gate_inputs = gate_inputs.reshape([-1, 2])
else:
gate_inputs = log_alpha
return gate_inputs
| 1,839 | 34.384615 | 128 | py |
SIT | SIT-master/tree_util.py | import numpy as np
import math
import matplotlib.pyplot as plt
import ipdb
import torch
def rotation_matrix(thea):
return np.array([
[np.cos(thea), -1 * np.sin(thea)],
[np.sin(thea), np.cos(thea)]
])
def generating_tree(seq, dir_list, split_interval=4, degree=3):
# seq [N n seq_len 2]
# dir_list left, right, straight
# return ----> [N n*degree seq_len+interval 2]
tree = np.zeros((seq.shape[0], seq.shape[1] * degree, seq.shape[2] + split_interval, seq.shape[-1]))
for i in range(degree):
curr_seq = seq
curr_dir = np.expand_dims(dir_list[i], 2) # N 1 1 2
for j in range(split_interval):
next_point = curr_seq[:, :, -1:] + curr_dir
curr_seq = np.concatenate((curr_seq, next_point), axis=-2)
tree[:, seq.shape[1] * i:seq.shape[1] * (i + 1)] = curr_seq
return tree
def get_dir(seq, thea=12, degree=3, dir_interval=1):
straight_dir = seq[:, :, -1] - seq[:, :, -dir_interval-1] # N n 2
straight_dir = straight_dir / dir_interval
dir_list = [straight_dir]
num_thea = int((degree - 1) / 2)
for i in range(num_thea):
th = (i + 1) * math.pi / thea
left_dir = np.matmul(np.expand_dims(rotation_matrix(th), 0), np.transpose(straight_dir, (0, 2, 1)))
right_dir = np.matmul(np.expand_dims(rotation_matrix(-th), 0), np.transpose(straight_dir, (0, 2, 1)))
left_dir = np.transpose(left_dir, (0, 2, 1))
right_dir = np.transpose(right_dir, (0, 2, 1))
dir_list.append(left_dir)
dir_list.append(right_dir)
return dir_list
def tree_v3(traj_seq, degree, split_interval, pred_len=12, thea=12):
# traj_seq [N obs_len 2]
basic_tree = traj_seq # N obs_len 2
basic_tree = np.expand_dims(basic_tree, 1) # N 1 obs_len 2
dir_list = get_dir(basic_tree, thea=thea, degree=degree) # split directions with the angle=pi/thea
tree = generating_tree(basic_tree, dir_list, split_interval, degree)
# angle= [4, 4]
for i in range(1, int(np.ceil(pred_len / split_interval))):
tree = generating_tree(tree, dir_list, split_interval, degree)
dir_list = get_dir(tree, 12 // (i + 1), degree=degree)
# dir_list = get_dir(tree, angle[i-1], degree=degree)
# dir_list = get_dir(tree, thea, degree=degree)
return tree
def tree_build(traj_batches, split_interval=4, degree=3, pred_len=12, obs_len=8, thea=6):
assert 1 <= split_interval <= pred_len
tree_batches = []
for b in traj_batches:
obs_traj = b[:, :obs_len]
tree = tree_v3(obs_traj, degree, split_interval, pred_len=pred_len, thea=thea)
tree_batches.append(tree[:, :, obs_traj.shape[1]:b.shape[1]]) # truncating if over-length
return tree_batches
def coarse_gt(full_trajs):
# full_traj N pred_len+1 2
obs_end_fut_traj = full_trajs[:, 7:]
obs_traj = full_trajs[:, :8]
selected_point = [0, 4, 8, 12]
selected_seq = obs_end_fut_traj[:, selected_point]
high_vel = selected_seq[:, 1:] - selected_seq[:, :-1]
high_vel = high_vel / 4
for i in range(12):
if i < 4:
next_point = obs_traj[:, -1:] + high_vel[:, 0:1]
obs_traj = np.concatenate((obs_traj, next_point), axis=1)
if 4 <= i < 8:
next_point = obs_traj[:, -1:] + high_vel[:, 1:2]
obs_traj = np.concatenate((obs_traj, next_point), axis=1)
if 8 <= i < 12:
next_point = obs_traj[:, -1:] + high_vel[:, 2:3]
obs_traj = np.concatenate((obs_traj, next_point), axis=1)
gt_ = obs_traj[:, 8:]
return gt_
def tree_label(tree, traj_seq):
closet_branch_index_batches = []
coarse_gt_list = []
for i in range(len(tree)):
gt = coarse_gt(traj_seq[i])
coarse_gt_list.append(gt)
gt = np.expand_dims(gt, 1) # N 1 pred_len 2
tr = tree[i]
distance_branch = np.linalg.norm(tr - gt, axis=-1) # N n T
# ade = np.mean(distance_branch, axis=-1)
fde = distance_branch[:, :, -1]
# distance = ade + fde
# distance_branch = np.max(distance_branch, axis=-1) # N n
# one-hot label
closet_branch_index = np.argmin(fde, axis=-1)
closet_branch_index_batches.append(closet_branch_index)
return closet_branch_index_batches, coarse_gt_list
def tree_build_iter(traj, split_interval=4, degree=3, pred_len=12, thea=12):
traj = traj.permute(0, 2, 1)
traj = traj[:, :, 2:]
traj = traj.numpy()
assert 1 <= split_interval <= pred_len
obs_traj = traj
tree = tree_v3(obs_traj, degree, split_interval, pred_len=pred_len, thea=thea)
tree = tree - tree[:, :, 7:8]
tree = tree[:, :, obs_traj.shape[1]:20]
return torch.from_numpy(tree).float()
def tree_label(tree, traj_seq):
# label_batches = []
closest_branch_index_batches = []
# closest_dir_index_batches = []
coarse_gt_list = []
interval = 4
for i in range(len(tree)):
# gt = traj_seq[i][:, 8:]
gt = coarse_gt(traj_seq[i])
coarse_gt_list.append(gt)
gt = np.expand_dims(gt, 1) # N 1 pred_len 2
tr = tree[i]
# dir = snowflake_[i] # N n interval 2
# distance_dir = np.linalg.norm(dir - gt[:, :, :interval], axis=-1) # N n T
# distance_dir = np.max(distance_dir, axis=-1) # N n
# one-hot label
# closet_dir_index = np.argmin(distance_dir, axis=-1) # N
# closet_dir_index_batches.append(closet_dir_index)
#
# ade = np.linalg.norm(tr - gt, axis=-1).mean(axis=-1) # N n
# distance_ = np.exp(-ade)
# dis_sum = np.sum(distance_, axis=1, keepdims=True)
# soft_label = distance_ / dis_sum
# min_fde_index = np.argmin(ade, axis=-1)
# label_batches.append(min_fde_index)
distance_branch = np.linalg.norm(tr - gt, axis=-1) # N n T
ade = np.mean(distance_branch, axis=1)
fde = distance_branch[:, :, -1]
# distance_branch = np.max(distance_branch, axis=-1) # N n
# one-hot label
closet_branch_index = np.argmin(fde, axis=-1)
# sec_fde_index = np.argsort(fde, axis=-1)[:, 1]
closest_branch_index_batches.append(closet_branch_index)
return closest_branch_index_batches, coarse_gt_list
def vis2(seq1):
for i in range(seq1.shape[0]):
plt.clf()
for j in range(seq1.shape[1]):
x1 = seq1[i, j, :, 0]
y1 = seq1[i, j, :, 1]
# x2 = seq2[i, :, 0]
# y2 = seq2[i, :, 1]
plt.plot(x1, y1, linestyle="-.", marker='.', color='red')
# plt.plot(x2, y2, linestyle="-.", marker='.', color='green')
plt.savefig('test_tree.png')
ipdb.set_trace()
| 6,747 | 33.080808 | 109 | py |
SIT | SIT-master/dataset.py | import pickle
import numpy as np
from torch.utils import data
from util import get_train_test_data, data_augmentation
from tree_util import tree_build, tree_label
class DatasetETHUCY(data.Dataset):
def __init__(self, data_path, dataset_name, batch_size, is_test, end_centered=True,
data_flip=False, data_scaling=None, obs_len=8, pred_len=12,
split_interval=4, degree=3, thea=6):
'preprocessing for eth-ucy dataset'
data_file = get_train_test_data(data_path, dataset_name, batch_size, is_test)
with open(data_file, 'rb') as f:
data = pickle.load(f)
trajs, masks = data
trajs_new = []
for traj in trajs:
t = np.array(traj)
t = t[:, :, 2:4]
t = data_augmentation(t, end_centered, data_flip, data_scaling)
trajs_new.append(t)
masks_new = []
for mask in masks:
masks_new.append(mask)
traj_new = np.array(trajs_new)
masks_new = np.array(masks_new)
self.trajectory_batches = traj_new.copy()
self.mask_batches = masks_new.copy()
traj_tree = tree_build(traj_new.copy(), split_interval=split_interval, degree=degree, pred_len=pred_len, obs_len=obs_len, thea=thea)
traj_tree = np.array(traj_tree) # N n T 2
self.traj_tree_batches = traj_tree.copy()
# coarse ground truth
if is_test is not True:
closest_branch_index_batches, coarse_gt_list = \
tree_label(traj_tree.copy(), traj_new.copy())
closest_branch_index_batches = np.array(closest_branch_index_batches)
coarse_gt_ = np.array(coarse_gt_list)
self.closest_branch_index_batches = closest_branch_index_batches.copy()
self.coarse_gt_batches = coarse_gt_.copy()
print("Initialized dataloader for ucy-eth!") | 1,893 | 34.074074 | 140 | py |
SIT | SIT-master/run.py | import argparse
from dataset import DatasetETHUCY
import util
import logging
import torch
from model.trajectory_model import TrajectoryModel
from torch.optim import Adam, lr_scheduler
import os
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def run(args: util.Args, device):
logger.info('**** data loading ******')
train_dataset = DatasetETHUCY(args.data_dir, args.dataset, args.train_batch_size, False, args.end_centered,
args.data_flip, args.data_scaling, args.obs_len, args.pred_len,
args.split_temporal_interval, args.tree_degree, args.split_thea)
test_dataset = DatasetETHUCY(args.data_dir, args.dataset, args.train_batch_size, True, args.end_centered,
False, None, args.obs_len, args.pred_len,
args.split_temporal_interval, args.tree_degree, args.split_thea)
logger.info('**** model loading ******')
model_args = util.ModelArgs # You can change the arguments of model directly in the ModelArgs class
model = TrajectoryModel(model_args).to(device)
optimizer = Adam(model.parameters(), lr=args.lr)
reg_criterion = torch.nn.SmoothL1Loss().to(device)
clf_criterion = torch.nn.CrossEntropyLoss().to(device)
if args.lr_scheduler == 0:
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_milestones, gamma=args.lr_gamma)
if args.lr_scheduler == 1:
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.lr_milestones[0])
min_ade = 99
min_fde = 99
best_epoch = 0
logger.info('**** model training ******')
for epoch in range(args.epoch):
total_loss, coarse_reg_loss, fine_reg_loss, clf_loss = train(args, model, optimizer, train_dataset,
reg_criterion, clf_criterion, device)
util.logging(
f'dataset:{args.dataset} '
f'epoch:{epoch} ',
f'total_loss:{sum(total_loss) / len(total_loss)} ',
f'coarse_reg_loss:{sum(coarse_reg_loss) / len(coarse_reg_loss)} ',
f'fine_reg_loss:{sum(fine_reg_loss) / len(fine_reg_loss)} ',
f'clf_loss:{sum(clf_loss) / len(clf_loss)} ',
verbose=True,
file_type='train_loss',
append_log=True
)
ade, fde = test(args, model, test_dataset, device)
util.logging(
f'dataset:{args.dataset} '
f'epoch:{epoch} ',
f'ade:{ade} ',
f'fde:{fde} ',
verbose=True,
file_type='ade_fde',
append_log=True
)
if args.lr_scheduler == 1 or args.lr_scheduler == 0:
scheduler.step()
if min_fde + min_ade > ade + fde:
min_fde = fde
min_ade = ade
best_epoch = epoch
torch.save(model.state_dict(), args.checkpoint + args.dataset + '/model.pth')
logger.info(f'dataset:{args.dataset}, curr_best_epoch:{best_epoch}, curr_min_ade:{min_ade},'
f' curr_min_fde:{min_fde}')
logger.info(f'dataset:{args.dataset}, best_epoch:{best_epoch}, min_ade:{min_ade}, min_fde:{min_fde}')
return
def get_train_loss(fine_trajs, gt_trajs, coarse_trajs, coarse_gt, path_score, closest_label, reg_criterion,
clf_criterion):
fine_trajs = fine_trajs.reshape(gt_trajs.shape)
coarse_trajs = coarse_trajs.reshape(coarse_gt.shape)
coarse_reg_loss = reg_criterion(coarse_trajs, coarse_gt)
fine_reg_loss = reg_criterion(fine_trajs, gt_trajs)
clf_loss = clf_criterion(path_score, closest_label)
loss = coarse_reg_loss + fine_reg_loss + clf_loss
return loss, coarse_reg_loss, fine_reg_loss, clf_loss
def train(args: util.Args, model, optimizer, dataloader, reg_criterion, clf_criterion, device):
model.train()
train_loss_list = []
coarse_reg_loss_list = []
fine_reg_loss_list = []
clf_loss_list = []
for i, (trajs, masks, trees, coarse_gt, closest_label) in enumerate(
zip(dataloader.trajectory_batches, dataloader.mask_batches, dataloader.traj_tree_batches,
dataloader.coarse_gt_batches, dataloader.closest_branch_index_batches)):
trajs = torch.FloatTensor(trajs).to(device)
masks = torch.FloatTensor(masks).to(device)
trees = torch.FloatTensor(trees).to(device)
coarse_gt = torch.FloatTensor(coarse_gt).to(device)
closest_label = torch.LongTensor(closest_label).to(device)
obs_trajs = trajs[:, :args.obs_len, :]
gt_trajs = trajs[:, args.obs_len:, :]
optimizer.zero_grad()
path_score, coarse_trajs, fine_trajs = model(obs_trajs, trees, coarse_gt, closest_label, masks, device)
loss, coarse_reg_loss, fine_reg_loss, clf_loss = \
get_train_loss(fine_trajs, gt_trajs, coarse_trajs, coarse_gt, path_score, closest_label, reg_criterion,
clf_criterion)
loss.backward()
optimizer.step()
train_loss_list.append(loss.item())
coarse_reg_loss_list.append(coarse_reg_loss.item())
fine_reg_loss_list.append(fine_reg_loss.item())
clf_loss_list.append(clf_loss.item())
return train_loss_list, coarse_reg_loss_list, fine_reg_loss_list, clf_loss_list
def test(args: util.Args, model, dataloader, device):
model.eval()
ade = 0
fde = 0
num_ped = 0
num_trajs = 0
for i, (trajs, masks, trees) in enumerate(zip(dataloader.trajectory_batches, dataloader.mask_batches,
dataloader.traj_tree_batches)):
trajs = torch.FloatTensor(trajs).to(device)
masks = torch.FloatTensor(masks).to(device)
trees = torch.FloatTensor(trees).to(device)
with torch.no_grad():
obs_trajs = trajs[:, :args.obs_len, :]
gt_trajs = trajs[:, args.obs_len:, :]
num_trajs += obs_trajs.shape[0]
pred_trajs, _ = model.predict(obs_trajs, trees, masks, args.num_k, device)
min_ade, min_fde = util.get_ade_fde(pred_trajs, gt_trajs, args.num_k)
ade += min_ade.item()
fde += min_fde.item()
num_ped += trajs.shape[0]
ade = ade / num_ped
fde = fde / num_ped
return ade, fde
def main():
logger.info('**** project args ******')
parser = argparse.ArgumentParser()
util.add_argument(parser)
args: util.Args = parser.parse_args()
util.init(args, logger)
device = torch.device('cuda:' + str(args.gpu_num) if torch.cuda.is_available() and args.cuda else 'cpu')
logger.info("device: {}".format(device))
run(args, device)
logger.info(f'Finished!')
if __name__ == '__main__':
main()
| 6,964 | 36.446237 | 115 | py |
SIT | SIT-master/util.py | from typing import Dict
import os
import subprocess
import random
import pickle
import torch
import numpy as np
import argparse
class Args:
dataset = None
epoch = None
lr = None
lr_scheduler = None
lr_milestones = None
lr_gamma = None
obs_len = None
pred_len = None
train_batch_size = None
test_batch_size = None
seed = None
gpu_num = None
checkpoint = None
data_dir = None
log_dir = None
cuda = None
end_centered = None
data_flip = None
data_scaling = None
# Arguments for the building of tree
split_thea = None
split_temporal_interval = None
tree_degree = None
num_k = None
class ModelArgs:
# Arguments for model
in_dim = 2
obs_len = 8
pred_len = 12
hidden1 = 1024
hidden2 = 256
enc_dim = 64
att_layer = 3
tf = True # teacher forcing
out_dim = 2
num_k = 20
def add_argument(parser):
assert isinstance(parser, argparse.ArgumentParser)
parser.add_argument('--dataset', type=str, default='eth', help='eth,hotel,univ,zara1,zara2,sdd')
parser.add_argument('--data_dir', type=str,
default='./dataset/')
parser.add_argument('--log_dir', type=str)
parser.add_argument('--epoch', type=int, default=350)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--lr_scheduler', type=int, default=0, help='0:MultiStepLR, 1:CosineAnnealingLR, other numbers:None')
parser.add_argument('--lr_milestones', type=int, nargs='+', default=[50, 150, 250])
parser.add_argument('--lr_gamma', type=float, default=0.5)
parser.add_argument('--obs_len', type=int, default=8)
parser.add_argument('--pred_len', type=int, default=12)
parser.add_argument('--train_batch_size', type=int, default=512,
help='256 or 512 for eth-ucy, 512 for sdd')
parser.add_argument('--test_batch_size', type=int, default=512,
help='256, 512 or 4096 for eth-ucy, 4096 for sdd')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--gpu_num', type=str, default='6')
parser.add_argument('--checkpoint', type=str, default='./checkpoints/')
parser.add_argument('--end_centered', action='store_true')
parser.add_argument('--data_flip', action='store_true')
parser.add_argument('--data_scaling', type=float, nargs='+', default=None)
parser.add_argument('--split_thea', type=int, default=4)
parser.add_argument('--split_temporal_interval', type=int, default=4)
parser.add_argument('--tree_degree', type=int, default=3)
parser.add_argument('--num_k', type=int, default=20)
def get_input_data(data_dict: Dict, key=None):
try:
return data_dict[key]
except KeyError:
print('KeyError')
args: Args = None
logger = None
def init(args_: Args, logger_):
global args, logger
args = args_
logger = logger_
# assert os.path.exists(args.checkpoint + args.dataset)
assert os.path.exists(args.data_dir + 'test')
assert os.path.exists(args.data_dir + 'train')
if args.log_dir is None:
args.log_dir = args.checkpoint + args.dataset
# os.makedirs(args.checkpoint + args.dataset, exist_ok=True)
# os.makedirs(args.log_dir, exist_ok=True)
if os.path.exists(args.checkpoint + args.dataset):
subprocess.check_output('rm -r {}'.format(args.checkpoint + args.dataset), shell=True, encoding='utf-8')
os.makedirs(args.checkpoint + args.dataset, exist_ok=False)
logger.info("*******" + ' args ' + "******")
# args_dict = vars(args)
# for key in args_dict:
# print("\033[32m" + key + "\033[0m", args_dict[key], end='\t')
# print('')
logging(vars(args_), verbose=True, sep=' ', save_as_pickle=True, file_type=args.dataset + '.args')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
def logging(*inputs, verbose=False, sep=' ', save_as_pickle=False, file_type='args', append_log=False):
'''
write something into log file
:return:
'''
if verbose:
print(*inputs, sep=sep)
if not hasattr(args, 'log_dir'):
return
file = os.path.join(args.log_dir, file_type)
if save_as_pickle:
with open(file, 'wb') as pickle_file:
pickle.dump(*inputs, pickle_file)
if append_log:
with open(file, "a", encoding='utf-8') as fout:
print(*tuple(inputs), file=fout, sep=sep)
print(file=fout)
def get_train_test_data(data_path, dataset_name, batch_size, is_test):
if is_test:
if dataset_name == 'sdd':
return data_path + '/test' + "/social_" + dataset_name + "_test" + "_" + str(
4096) + "_" + str(0) + "_" + str(100) + ".pickle"
else:
return data_path + '/test' + "/social_" + dataset_name + "_test" + "_" + str(
batch_size) + "_" + str(0) + "_" + str(50) + ".pickle"
else:
if dataset_name == 'sdd':
return data_path + '/train' + "/social_" + dataset_name + "_train" + "_" + str(
512) + "_" + str(0) + "_" + str(100) + ".pickle"
else:
return data_path + '/train' + "/social_" + dataset_name + "_train" + "_" + str(
batch_size) + "_" + str(0) + "_" + str(50) + ".pickle"
def data_augmentation(data_, end_centered, is_flip, data_scaling):
if end_centered:
data_ = data_ - data_[:, 7:8]
if is_flip:
data_ = np.flip(data_, axis=-1).copy()
if data_scaling is not None:
data_[:, :, 0] = data_[:, :, 0] * data_scaling[0]
data_[:, :, 1] = data_[:, :, 1] * data_scaling[1]
return data_
def get_ade_fde(pred_trajs, gt_trajs, num_k):
pred_trajs = pred_trajs.reshape(gt_trajs.shape[0], num_k, gt_trajs.shape[1], -1)
gt_trajs = gt_trajs.unsqueeze(1)
norm_ = torch.norm(pred_trajs - gt_trajs, p=2, dim=-1)
ade_ = torch.mean(norm_, dim=-1)
fde_ = norm_[:, :, -1]
min_ade, _ = torch.min(ade_, dim=-1)
min_fde, _ = torch.min(fde_, dim=-1)
min_ade = torch.sum(min_ade)
min_fde = torch.sum(min_fde)
return min_ade, min_fde
| 6,257 | 29.231884 | 125 | py |
SIT | SIT-master/model/component.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Activation_Fun(nn.Module):
def __init__(self, act_name):
super(Activation_Fun, self).__init__()
if act_name == 'relu':
self.act = nn.ReLU()
if act_name == 'prelu':
self.act = nn.PReLU()
if act_name == 'sigmoid':
self.act == nn.Sigmoid()
def forward(self, x):
return self.act(x)
class MLP(nn.Module):
def __init__(self, in_size, out_size=None, normalization=False, act_name='prelu'):
super(MLP, self).__init__()
if out_size is None:
out_size = in_size
self.linear = nn.Linear(in_size, out_size)
self.ln = LayerNorm(out_size) if normalization else nn.Sequential()
self.activation = Activation_Fun(act_name)
def forward(self, x):
x = self.linear(x)
x = self.ln(x)
x = self.activation(x)
return x
class LayerNorm(nn.Module):
r"""
Layer normalization.
"""
def __init__(self, hidden_size, eps=1e-5):
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class SelfAttention(nn.Module):
def __init__(self, in_size, hidden_size=256, out_size=64, non_linear=True):
super(SelfAttention, self).__init__()
self.query = nn.Sequential(
nn.Linear(in_size, hidden_size),
nn.PReLU(),
nn.Linear(hidden_size, out_size)
) if non_linear else nn.Linear(in_size, out_size)
self.key = nn.Sequential(
nn.Linear(in_size, hidden_size),
nn.PReLU(),
nn.Linear(hidden_size, out_size)
) if non_linear else nn.Linear(in_size, out_size)
self.softmax = nn.Softmax(dim=-1)
def forward(self, query, key, mask=None, interaction=True):
assert len(query.shape) == 3
query = self.query(query) # batch_size seq_len d_model
query = query / float(math.sqrt(query.shape[-1]))
key = self.key(key) # batch_size seq_len d_model
attention = torch.matmul(query, key.permute(0, 2, 1)) # (batch_size, seq_len, seq_len)
if mask is None and interaction is True:
return attention # for path scoring
if mask is not None and interaction is True:
attention = F.softmax(attention, dim=-1)
attention = attention * mask # setting the attention score of pedestrian who are not in the scene to zero
attention = F.normalize(attention, p=1, dim=-1) # normalizing the non-zero value
return attention | 2,946 | 31.384615 | 118 | py |
SIT | SIT-master/model/trajectory_model.py |
import torch
import torch.nn as nn
from model.component import MLP
from model.component import SelfAttention
from util import ModelArgs
class TrajectoryModel(nn.Module):
def __init__(self, args: ModelArgs):
super(TrajectoryModel, self).__init__()
in_dim = args.in_dim
obs_len = args.obs_len
pred_len = args.pred_len
hidden1 = args.hidden1
hidden2 = args.hidden2
enc_dim = args.enc_dim
att_layer = args.att_layer
out_dim = args.out_dim
self.obs_enc = nn.Sequential(
MLP(in_dim*obs_len, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, enc_dim)
)
# self attention for interaction
self.int_att = nn.ModuleList(
[SelfAttention(in_size=enc_dim, hidden_size=hidden2, out_size=enc_dim) for _ in range(att_layer)]
)
self.tree_enc = nn.Sequential(
MLP(in_dim*pred_len, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, enc_dim)
)
self.coarse_prediction = nn.Sequential(
MLP(enc_dim*2, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, out_dim*pred_len)
)
self.refining_enc = nn.Sequential(
MLP(in_dim*pred_len, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, enc_dim)
)
self.scoring_att = SelfAttention(in_size=enc_dim, hidden_size=hidden2, out_size=enc_dim)
self.refining = nn.Sequential(
MLP(enc_dim*2, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, out_dim*pred_len)
)
self.output = nn.Linear(out_dim*pred_len, out_dim*pred_len)
self.tf = args.tf
def forward(self, obs_trajs, tree, coarse_gt, closest_label, mask, device):
obs_trajs_ = obs_trajs.reshape(obs_trajs.shape[0], 1, -1) # N 1 16
tree = tree.reshape(tree.shape[0], tree.shape[1], -1) # N n 24
obs_enc = self.obs_enc(obs_trajs_) # N 1 enc_dim
obs_enc = obs_enc.permute(1, 0, 2) # 1 N enc_dim
for i in range(len(self.int_att)):
int_mat = self.int_att[i](obs_enc, obs_enc, mask)
obs_enc = obs_enc + torch.matmul(int_mat, obs_enc)
obs_enc = obs_enc.permute(1, 0, 2) # N 1 enc_dim
tree_enc = self.tree_enc(tree) # N n enc_dim
path_score = self.scoring_att(obs_enc, tree_enc).squeeze() # N n # cross attention for classification
ped_index = torch.arange(0, obs_trajs.shape[0]).to(device)
closet_branch_enc = tree_enc[ped_index, closest_label] # N enc_dim
con_enc = torch.cat((obs_enc.squeeze(), closet_branch_enc), dim=-1) # N enc_dim*2
coarse_pred_traj = self.coarse_prediction(con_enc) # N 24
if self.tf:
coarse_traj_ = coarse_gt.reshape(coarse_gt.shape) # Teacher forcing
else:
coarse_traj_ = coarse_pred_traj # without teacher forcing
coarse_traj_ = coarse_traj_.reshape(coarse_traj_.shape[0], -1)
coarse_enc = self.refining_enc(coarse_traj_)
con_coarse_enc = torch.cat((obs_enc.squeeze(), coarse_enc), dim=-1) # [N 128]
refining_traj = self.refining(con_coarse_enc)
predicted_traj = self.output(refining_traj)
return path_score, coarse_pred_traj, predicted_traj
def predict(self, obs_trajs, tree, mask, num_k, device):
obs_trajs_ = obs_trajs.reshape(obs_trajs.shape[0], 1, -1) # N 1 16
tree = tree.reshape(tree.shape[0], tree.shape[1], -1) # N n 24
obs_enc = self.obs_enc(obs_trajs_) # N 1 enc_dim
tree_enc = self.tree_enc(tree) # N n enc_dim
obs_enc = obs_enc.permute(1, 0, 2) # 1 N enc_dim
for i in range(len(self.int_att)):
int_mat = self.int_att[i](obs_enc, obs_enc, mask)
obs_enc = obs_enc + torch.matmul(int_mat, obs_enc)
obs_enc = obs_enc.permute(1, 0, 2) # N 1 enc_dim
path_score = self.scoring_att(obs_enc, tree_enc).squeeze() # N n # cross attention for classification
top_k_indices = torch.topk(path_score, k=num_k, dim=-1).indices # N num_k
top_k_indices = top_k_indices.flatten() # N*num_k
ped_indices = torch.arange(0, obs_trajs.shape[0]).unsqueeze(1).to(device) # N 1
ped_indices = ped_indices.repeat(1, num_k).flatten() # N*num_k
selected_paths_enc = tree_enc[ped_indices, top_k_indices] # N*num_k enc_dim
selected_paths_enc = selected_paths_enc.reshape(tree_enc.shape[0], num_k, -1)
obs_enc = obs_enc.repeat(1, selected_paths_enc.shape[1], 1) # N num_k enc_dim
con_enc = torch.cat((obs_enc, selected_paths_enc), dim=-1) # N num_k enc_dim*2
coarse_traj = self.coarse_prediction(con_enc) # N num_k 24
coarse_enc = self.refining_enc(coarse_traj)
con_coarse_enc = torch.cat((obs_enc, coarse_enc), dim=-1)
refining_traj = self.refining(con_coarse_enc) # N num_k enc_dim
predicted_traj = self.output(refining_traj) # N num_k 24
return predicted_traj, path_score
# sdd thea: 4 12 6
# 9.71 17.26
# 9.48 16.70
# 9.44 16.62
# 9.61 16.50
# 9.62 16.19
# 9.38 15.97
# 9.25 15.57
# 9.11 15.74
# 9.12 15.63
# 9.23 15.47
# 9.09 15.54
# eth
# 0.41 0.62
# 0.41 0.59 lr:0.001 thea:4 6 4
# hotel
# 0.15 0.29 lr:0.001 thea:4 6 4
# 0.17 0.29 lr:0.001 thea:12 6 4
# 0.18 0.28 lr:0.001 thea:12 12 12
# 0.15 0.26 flip
# 0.15 0.22 thea:12 6 4
# 0.15 0.25 thea: 4 6 4
# 0.14 0.25 thea: 4 6 4 [250]
# 0.14 0.22 thea: 6 6 4
# univ
# 0.65 1.18 bs:512 thea:4 6 4
# 0.27 0.47 bs:256 thea:4 6 4
# zara1
# 0.23 0.37 lr:0.003 thea:4 6 4
# 0.21 0.36 lr:0.001 thea:4 6 4
# 0.21 0.36
# 0.20 0.34 thea:12 6 4
# 0.19 0.33
# zara2
# 0.17 0.29 lr:0.003 thea:4 6 4
# 0.16 0.29 lr:0.001
# 0.16 0.30 12 6 4
| 6,157 | 33.022099 | 111 | py |
adanet | adanet-master/research/improve_nas/trainer/cifar100.py | # Lint as: python3
"""CIFAR-100 data and convenience functions.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.keras.datasets import cifar100
# pylint: disable=g-import-not-at-top
try:
from adanet.research.improve_nas.trainer import image_processing
except ImportError as e:
from trainer import image_processing
# pylint: enable=g-import-not-at-top
FEATURES = 'x'
PreprocessingType = image_processing.PreprocessingType
class Provider(object):
"""A CIFAR-100 data provider."""
def __init__(self,
params_string='',
seed=None):
"""Returns a CIFAR-100 `Provider`."""
# For testing
self._seed = seed
default_params = tf.contrib.training.HParams(
cutout=True, augmentation=PreprocessingType.BASIC)
self._params = default_params.parse(params_string)
def _preprocess_data(self, image, label, training, preprocess):
"""Apply Inception data augmentation and preprocessing."""
# Unpack `Element` tuple.
# image, label = element
if preprocess:
image_height, image_width = self._shape()[:2]
if self._params.augmentation == PreprocessingType.BASIC:
image = image_processing.resize_and_normalize(image, image_height,
image_width)
if training:
image = image_processing.basic_augmentation(image, image_height,
image_width, self._seed)
else:
raise ValueError('Unsupported data augmentation type: `%s`' %
self._params.augmentation)
if training and self._params.cutout:
# According to https://arxiv.org/abs/1708.04552, cutting out 16x16
# works best.
image = image_processing.cutout(image, pad_size=8, seed=self._seed)
# Set shapes so that they are defined.
image.set_shape(self._shape())
if label is not None:
label.set_shape([1])
return {FEATURES: image}, label
def _cifar100_dataset(self, partition):
"""Returns a partition of the CIFAR-100 `Dataset`."""
cifar100_data = None
try:
cifar100_data = cifar100.load_data()
tf.logging.info('Loaded cifar100.')
except: # pylint: disable=bare-except
tf.logging.info(
'Can not load cifar100 from internet. Creating dummy data for '
'testing.')
data = np.zeros((3, 32, 32, 3))
labels = np.array([[47], [52], [5]])
data[:, 0, 0] = [220, 25, 47]
data[:, -1, 0, 0] = 128
cifar100_data = ((data, labels), (data, labels))
(x_train, y_train), (x_test, y_test) = cifar100_data
x = None
y = None
if partition == 'train':
x, y = x_train, y_train
else:
x, y = x_test, y_test
dataset = tf.data.Dataset.from_tensor_slices((x, y.astype(np.int32)))
return dataset.cache()
def _shape(self):
"""Returns a 3-dimensional list with the shape of the image."""
return [32, 32, 3]
def get_input_fn(self,
partition,
mode,
batch_size,
preprocess=True,
use_tpu=False):
"""See `data.Provider` get_input_fn."""
def input_fn(params=None):
"""Provides batches of CIFAR images.
Args:
params: A dict containing the batch_size on TPU, otherwise None.
Returns:
images: A `Tensor` of size [batch_size, 32, 32, 3]
labels: A `Tensor` of size [batch_size, 1],
"""
batch_size_ = batch_size
if use_tpu:
batch_size_ = params.get('batch_size', batch_size)
training = mode == tf.estimator.ModeKeys.TRAIN
dataset = self._cifar100_dataset(partition)
dataset = dataset.map(
functools.partial(
self._preprocess_data, training=training, preprocess=preprocess))
if training:
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(
buffer_size=500, seed=self._seed))
return dataset.batch(
batch_size_,
drop_remainder=use_tpu).prefetch(tf.data.experimental.AUTOTUNE
).make_one_shot_iterator().get_next()
return input_fn
def get_head(self, name=None):
"""Returns a `Head` instance for CIFAR-100 with the given name."""
return tf.contrib.estimator.multi_class_head(
100, name=name, loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
def get_feature_columns(self):
"""Returns feature columns."""
feature_columns = [
tf.feature_column.numeric_column(key=FEATURES, shape=self._shape())
]
return feature_columns
| 5,382 | 31.823171 | 79 | py |
adanet | adanet-master/research/improve_nas/trainer/cifar10.py | # Lint as: python3
"""CIFAR-10 data and convenience functions.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.keras.datasets import cifar10
# pylint: disable=g-import-not-at-top
try:
from adanet.research.improve_nas.trainer import image_processing
except ImportError as e:
from trainer import image_processing
# pylint: enable=g-import-not-at-top
FEATURES = 'x'
PreprocessingType = image_processing.PreprocessingType
class Provider(object):
"""A CIFAR-10 data provider."""
def __init__(self,
params_string='',
seed=None):
"""Returns a CIFAR-10 `Provider`."""
# For testing
self._seed = seed
default_params = tf.contrib.training.HParams(
cutout=True, augmentation=PreprocessingType.BASIC)
self._params = default_params.parse(params_string)
def _preprocess_data(self, image, label, training, preprocess):
"""Apply Inception data augmentation and preprocessing."""
# Unpack `Element` tuple.
# image, label = element
if preprocess:
image_height, image_width = self._shape()[:2]
if self._params.augmentation == PreprocessingType.BASIC:
image = image_processing.resize_and_normalize(image, image_height,
image_width)
if training:
image = image_processing.basic_augmentation(image, image_height,
image_width, self._seed)
else:
raise ValueError('Unsupported data augmentation type: `%s`' %
self._params.augmentation)
if training and self._params.cutout:
# According to https://arxiv.org/abs/1708.04552, cutting out 16x16
# works best.
image = image_processing.cutout(image, pad_size=8, seed=self._seed)
# Set shapes so that they are defined.
image.set_shape(self._shape())
if label is not None:
label.set_shape([1])
return {FEATURES: image}, label
def _cifar10_dataset(self, partition):
"""Returns a partition of the CIFAR-10 `Dataset`."""
cifar10_data = None
try:
cifar10_data = cifar10.load_data()
tf.logging.info('Loaded cifar10.')
except: # pylint: disable=bare-except
tf.logging.info(
'Can not load cifar10 from internet. Creating dummy data for '
'testing.')
data = np.zeros((3, 32, 32, 3))
labels = np.array([[5], [3], [9]])
data[:, 0, 0] = [148, 141, 174]
data[:, -1, 0, 0] = 128
cifar10_data = ((data, labels), (data, labels))
(x_train, y_train), (x_test, y_test) = cifar10_data
x = None
y = None
if partition == 'train':
x, y = x_train, y_train
else:
x, y = x_test, y_test
dataset = tf.data.Dataset.from_tensor_slices((x, y.astype(np.int32)))
return dataset.cache()
def _shape(self):
"""Returns a 3-dimensional list with the shape of the image."""
return [32, 32, 3]
def get_input_fn(self,
partition,
mode,
batch_size,
preprocess=True,
use_tpu=False):
"""See `data.Provider` get_input_fn."""
def input_fn(params=None):
"""Provides batches of CIFAR images.
Args:
params: A dict containing the batch_size on TPU, otherwise None.
Returns:
images: A `Tensor` of size [batch_size, 32, 32, 3]
labels: A `Tensor` of size [batch_size, 1],
"""
batch_size_ = batch_size
if use_tpu:
batch_size_ = params.get('batch_size', batch_size)
training = mode == tf.estimator.ModeKeys.TRAIN
dataset = self._cifar10_dataset(partition)
dataset = dataset.map(
functools.partial(
self._preprocess_data, training=training, preprocess=preprocess))
if training:
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(
buffer_size=500, seed=self._seed))
return dataset.batch(
batch_size_,
drop_remainder=use_tpu).prefetch(tf.data.experimental.AUTOTUNE
).make_one_shot_iterator().get_next()
return input_fn
def get_head(self, name=None):
"""Returns a `Head` instance for multiclass CIFAR-10 with the given name."""
return tf.contrib.estimator.multi_class_head(
10, name=name, loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
def get_feature_columns(self):
"""Returns feature columns."""
feature_columns = [
tf.feature_column.numeric_column(key=FEATURES, shape=self._shape())
]
return feature_columns
| 5,373 | 32.5875 | 80 | py |
adanet | adanet-master/docs/source/conf.py | # -*- coding: utf-8 -*-
# Copyright 2018 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the Sphinx documentation builder.
This file does only contain a selection of the most common options. For a
full list see the documentation:
http://www.sphinx-doc.org/en/master/usage/configuration.html
"""
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
from adanet import version as ver # pylint: disable=g-import-not-at-top
# -- Project information -----------------------------------------------------
project = u'adanet'
copyright = u'2018, AdaNet Authors' # pylint: disable=redefined-builtin
author = u'AdaNet Authors'
# The short X.Y version
version = ver.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_logo = './assets/adanet_tangram_logo.png'
html_context = {
'css_files': ['_static/custom.css'],
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'adanetdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'adanet.tex', u'adanet Documentation', u'AdaNet Authors',
'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'adanet', u'adanet Documentation', [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'adanet', u'adanet Documentation', author, 'adanet',
'One line description of project.', 'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| 6,636 | 31.218447 | 79 | py |
adanet | adanet-master/adanet/modelflow_test.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test ModelFlow imports."""
import adanet.experimental as adanet
import tensorflow.compat.v2 as tf
class ModelFlowTest(tf.test.TestCase):
def test_public(self):
self.assertIsNotNone(adanet.controllers.SequentialController)
self.assertIsNotNone(adanet.keras.EnsembleModel)
self.assertIsNotNone(adanet.keras.MeanEnsemble)
self.assertIsNotNone(adanet.keras.WeightedEnsemble)
self.assertIsNotNone(adanet.keras.ModelSearch)
self.assertIsNotNone(adanet.phases.AutoEnsemblePhase)
self.assertIsNotNone(adanet.phases.InputPhase)
self.assertIsNotNone(adanet.phases.KerasTrainerPhase)
self.assertIsNotNone(adanet.phases.KerasTunerPhase)
self.assertIsNotNone(adanet.phases.RepeatPhase)
self.assertIsNotNone(adanet.schedulers.InProcessScheduler)
self.assertIsNotNone(adanet.storages.InMemoryStorage)
self.assertIsNotNone(adanet.work_units.KerasTrainerWorkUnit)
self.assertIsNotNone(adanet.work_units.KerasTunerWorkUnit)
if __name__ == "__main__":
tf.test.main()
| 1,637 | 38.95122 | 74 | py |
adanet | adanet-master/adanet/core/ensemble_builder_test.py | """Test AdaNet ensemble single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.ensemble_builder import _EnsembleBuilder
from adanet.core.ensemble_builder import _SubnetworkManager
from adanet.core.summary import Summary
import adanet.core.testing_utils as tu
from adanet.ensemble import Candidate as EnsembleCandidate
from adanet.ensemble import ComplexityRegularizedEnsembler
from adanet.ensemble import MeanEnsemble
from adanet.ensemble import MeanEnsembler
from adanet.ensemble import MixtureWeightType
from adanet.subnetwork import Builder
from adanet.subnetwork import Subnetwork
import tensorflow.compat.v1 as tf_v1
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.training import training as train
from tensorflow.python.training import training_util
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib
class _Builder(Builder):
def __init__(self,
subnetwork_train_op_fn,
mixture_weights_train_op_fn,
use_logits_last_layer,
seed=42,
multi_head=False):
self._subnetwork_train_op_fn = subnetwork_train_op_fn
self._mixture_weights_train_op_fn = mixture_weights_train_op_fn
self._use_logits_last_layer = use_logits_last_layer
self._seed = seed
self._multi_head = multi_head
@property
def name(self):
return "test"
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
assert features is not None
assert training is not None
assert iteration_step is not None
assert summary is not None
# Trainable variables collection should always be empty when
# build_subnetwork is called.
assert not tf_compat.v1.get_collection(
tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES)
# Subnetworks get iteration steps instead of global steps.
step_name = "subnetwork_test/iteration_step"
assert step_name == tf_compat.tensor_name(
tf_compat.v1.train.get_global_step())
assert step_name == tf_compat.tensor_name(train.get_global_step())
assert step_name == tf_compat.tensor_name(training_util.get_global_step())
assert step_name == tf_compat.tensor_name(tf_v1.train.get_global_step())
assert step_name == tf_compat.tensor_name(
tf_compat.v1.train.get_or_create_global_step())
assert step_name == tf_compat.tensor_name(train.get_or_create_global_step())
assert step_name == tf_compat.tensor_name(
training_util.get_or_create_global_step())
assert step_name == tf_compat.tensor_name(
tf_v1.train.get_or_create_global_step())
# Subnetworks get scoped summaries.
assert "fake_scalar" == tf_compat.v1.summary.scalar("scalar", 1.)
assert "fake_image" == tf_compat.v1.summary.image("image", 1.)
assert "fake_histogram" == tf_compat.v1.summary.histogram("histogram", 1.)
assert "fake_audio" == tf_compat.v1.summary.audio("audio", 1., 1.)
last_layer = tu.dummy_tensor(shape=(2, 3))
def logits_fn(logits_dim):
return tf_compat.v1.layers.dense(
last_layer,
units=logits_dim,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=self._seed))
if self._multi_head:
logits = {
"head1": logits_fn(logits_dimension / 2),
"head2": logits_fn(logits_dimension / 2)
}
last_layer = {"head1": last_layer, "head2": last_layer}
else:
logits = logits_fn(logits_dimension)
return Subnetwork(
last_layer=logits if self._use_logits_last_layer else last_layer,
logits=logits,
complexity=2,
persisted_tensors={})
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
assert iteration_step is not None
assert summary is not None
return self._subnetwork_train_op_fn(loss, var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
assert iteration_step is not None
assert summary is not None
return self._mixture_weights_train_op_fn(loss, var_list)
class _BuilderPrunerAll(_Builder):
"""Removed previous ensemble completely."""
def prune_previous_ensemble(self, previous_ensemble):
return []
class _BuilderPrunerLeaveOne(_Builder):
"""Removed previous ensemble completely."""
def prune_previous_ensemble(self, previous_ensemble):
if previous_ensemble:
return [0]
return []
class _FakeSummary(Summary):
"""A fake adanet.Summary."""
def scalar(self, name, tensor, family=None):
return "fake_scalar"
def image(self, name, tensor, max_outputs=3, family=None):
return "fake_image"
def histogram(self, name, values, family=None):
return "fake_histogram"
def audio(self, name, tensor, sample_rate, max_outputs=3, family=None):
return "fake_audio"
@contextlib.contextmanager
def current_scope(self):
yield
class EnsembleBuilderTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_previous_ensemble",
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "mean_ensembler",
"want_logits": [[.621], [.979]],
"want_loss": 1.3702,
"want_adanet_loss": 1.3702,
"want_ensemble_trainable_vars": 0,
"ensembler_class": MeanEnsembler,
"want_predictions": {
MeanEnsemble.MEAN_LAST_LAYER: [[-0.2807, -0.1377, -0.6763],
[0.0245, -0.8935, -0.8284]],
}
}, {
"testcase_name": "no_previous_ensemble_prune_all",
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
"subnetwork_builder_class": _BuilderPrunerAll
}, {
"testcase_name": "no_previous_ensemble_prune_leave_one",
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
"subnetwork_builder_class": _BuilderPrunerLeaveOne
}, {
"testcase_name": "default_mixture_weight_initializer_scalar",
"mixture_weight_initializer": None,
"mixture_weight_type": MixtureWeightType.SCALAR,
"use_logits_last_layer": True,
"want_logits": [[.580], [.914]],
"want_loss": 1.362,
"want_adanet_loss": 1.362,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "default_mixture_weight_initializer_vector",
"mixture_weight_initializer": None,
"mixture_weight_type": MixtureWeightType.VECTOR,
"use_logits_last_layer": True,
"want_logits": [[.580], [.914]],
"want_loss": 1.362,
"want_adanet_loss": 1.362,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "default_mixture_weight_initializer_matrix",
"mixture_weight_initializer": None,
"mixture_weight_type": MixtureWeightType.MATRIX,
"want_logits": [[.016], [.117]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name":
"default_mixture_weight_initializer_matrix_on_logits",
"mixture_weight_initializer":
None,
"mixture_weight_type":
MixtureWeightType.MATRIX,
"use_logits_last_layer":
True,
"want_logits": [[.030], [.047]],
"want_loss":
1.378,
"want_adanet_loss":
1.378,
"want_ensemble_trainable_vars":
1,
}, {
"testcase_name": "no_previous_ensemble_use_bias",
"use_bias": True,
"want_logits": [[0.013], [0.113]],
"want_loss": 1.338,
"want_adanet_loss": 1.338,
"want_ensemble_trainable_vars": 2,
}, {
"testcase_name": "no_previous_ensemble_predict_mode",
"mode": tf.estimator.ModeKeys.PREDICT,
"want_logits": [[0.], [0.]],
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "no_previous_ensemble_lambda",
"adanet_lambda": .01,
"want_logits": [[.014], [.110]],
"want_loss": 1.340,
"want_adanet_loss": 1.343,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "no_previous_ensemble_beta",
"adanet_beta": .1,
"want_logits": [[.006], [.082]],
"want_loss": 1.349,
"want_adanet_loss": 1.360,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "no_previous_ensemble_lambda_and_beta",
"adanet_lambda": .01,
"adanet_beta": .1,
"want_logits": [[.004], [.076]],
"want_loss": 1.351,
"want_adanet_loss": 1.364,
"want_ensemble_trainable_vars": 1,
}, {
"testcase_name": "multi_head",
"want_logits": {
"head1": [[.016], [.117]],
"head2": [[.016], [.117]],
},
"want_loss": 2.675,
"want_adanet_loss": 2.675,
"multi_head": True,
"want_ensemble_trainable_vars": 2,
"want_subnetwork_trainable_vars": 4,
}, {
"testcase_name": "expect_subnetwork_exports",
"mode": tf.estimator.ModeKeys.PREDICT,
"want_logits": [[0.], [0.]],
"want_ensemble_trainable_vars": 1,
"export_subnetworks": True,
}, {
"testcase_name": "multi_head_expect_subnetwork_exports",
"mode": tf.estimator.ModeKeys.PREDICT,
"multi_head": True,
"want_logits": {
"head1": [[0.], [0.]],
"head2": [[0.], [0.]],
},
"want_ensemble_trainable_vars": 2,
"want_subnetwork_trainable_vars": 4,
"export_subnetworks": True,
}, {
"testcase_name": "replay_no_prev",
"adanet_beta": .1,
"want_logits": [[.006], [.082]],
"want_loss": 1.349,
"want_adanet_loss": 1.360,
"want_ensemble_trainable_vars": 1,
"my_ensemble_index": 2,
"want_replay_indices": [2],
})
@test_util.run_in_graph_and_eager_modes
def test_build_ensemble_spec(
self,
want_logits,
want_loss=None,
want_adanet_loss=None,
want_ensemble_trainable_vars=None,
adanet_lambda=0.,
adanet_beta=0.,
ensemble_spec_fn=lambda: None,
use_bias=False,
use_logits_last_layer=False,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
subnetwork_builder_class=_Builder,
mode=tf.estimator.ModeKeys.TRAIN,
multi_head=False,
want_subnetwork_trainable_vars=2,
ensembler_class=ComplexityRegularizedEnsembler,
my_ensemble_index=None,
want_replay_indices=None,
want_predictions=None,
export_subnetworks=False,
previous_ensemble_spec=None,
previous_iteration_checkpoint=None):
seed = 64
if multi_head:
head = multi_head_lib.MultiHead(heads=[
binary_class_head.BinaryClassHead(
name="head1", loss_reduction=tf_compat.SUM),
binary_class_head.BinaryClassHead(
name="head2", loss_reduction=tf_compat.SUM)
])
else:
head = binary_class_head.BinaryClassHead(loss_reduction=tf_compat.SUM)
builder = _EnsembleBuilder(
head=head,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks)
def _subnetwork_train_op_fn(loss, var_list):
self.assertLen(var_list, want_subnetwork_trainable_vars)
self.assertEqual(
var_list,
tf_compat.v1.get_collection(
tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES))
# Subnetworks get iteration steps instead of global steps.
self.assertEqual("subnetwork_test/iteration_step",
tf_compat.v1.train.get_global_step().op.name)
# Subnetworks get scoped summaries.
self.assertEqual("fake_scalar", tf_compat.v1.summary.scalar("scalar", 1.))
self.assertEqual("fake_image", tf_compat.v1.summary.image("image", 1.))
self.assertEqual("fake_histogram",
tf_compat.v1.summary.histogram("histogram", 1.))
self.assertEqual("fake_audio",
tf_compat.v1.summary.audio("audio", 1., 1.))
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.1)
return optimizer.minimize(loss, var_list=var_list)
def _mixture_weights_train_op_fn(loss, var_list):
self.assertLen(var_list, want_ensemble_trainable_vars)
self.assertEqual(
var_list,
tf_compat.v1.get_collection(
tf_compat.v1.GraphKeys.TRAINABLE_VARIABLES))
# Subnetworks get iteration steps instead of global steps.
self.assertEqual("ensemble_test/iteration_step",
tf_compat.v1.train.get_global_step().op.name)
# Subnetworks get scoped summaries.
self.assertEqual("fake_scalar", tf_compat.v1.summary.scalar("scalar", 1.))
self.assertEqual("fake_image", tf_compat.v1.summary.image("image", 1.))
self.assertEqual("fake_histogram",
tf_compat.v1.summary.histogram("histogram", 1.))
self.assertEqual("fake_audio",
tf_compat.v1.summary.audio("audio", 1., 1.))
if not var_list:
return tf.no_op()
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.1)
return optimizer.minimize(loss, var_list=var_list)
previous_ensemble = None
previous_ensemble_spec = ensemble_spec_fn()
if previous_ensemble_spec:
previous_ensemble = previous_ensemble_spec.ensemble
subnetwork_manager = _SubnetworkManager(head)
subnetwork_builder = subnetwork_builder_class(
_subnetwork_train_op_fn,
_mixture_weights_train_op_fn,
use_logits_last_layer,
seed,
multi_head=multi_head)
with tf.Graph().as_default() as g:
tf_compat.v1.train.get_or_create_global_step()
# A trainable variable to later verify that creating models does not
# affect the global variables collection.
_ = tf_compat.v1.get_variable("some_var", shape=0, trainable=True)
features = {"x": tf.constant([[1.], [2.]])}
if multi_head:
labels = {"head1": tf.constant([0, 1]), "head2": tf.constant([0, 1])}
else:
labels = tf.constant([0, 1])
session_config = tf.compat.v1.ConfigProto(
gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))
subnetwork_spec = subnetwork_manager.build_subnetwork_spec(
name="test",
subnetwork_builder=subnetwork_builder,
summary=_FakeSummary(),
features=features,
mode=mode,
labels=labels,
previous_ensemble=previous_ensemble)
ensembler_kwargs = {}
if ensembler_class is ComplexityRegularizedEnsembler:
ensembler_kwargs.update({
"mixture_weight_type": mixture_weight_type,
"mixture_weight_initializer": mixture_weight_initializer,
"warm_start_mixture_weights": warm_start_mixture_weights,
"model_dir": self.test_subdirectory,
"adanet_lambda": adanet_lambda,
"adanet_beta": adanet_beta,
"use_bias": use_bias
})
if ensembler_class is MeanEnsembler:
ensembler_kwargs.update({"add_mean_last_layer_predictions": True})
ensemble_spec = builder.build_ensemble_spec(
# Note: when ensemble_spec is not None and warm_start_mixture_weights
# is True, we need to make sure that the bias and mixture weights are
# already saved to the checkpoint_dir.
name="test",
previous_ensemble_spec=previous_ensemble_spec,
candidate=EnsembleCandidate("foo", [subnetwork_builder], None),
ensembler=ensembler_class(**ensembler_kwargs),
subnetwork_specs=[subnetwork_spec],
summary=_FakeSummary(),
features=features,
iteration_number=1,
labels=labels,
my_ensemble_index=my_ensemble_index,
mode=mode,
previous_iteration_checkpoint=previous_iteration_checkpoint)
if want_replay_indices:
self.assertAllEqual(want_replay_indices,
ensemble_spec.architecture.replay_indices)
with tf_compat.v1.Session(
graph=g, config=session_config).as_default() as sess:
sess.run(tf_compat.v1.global_variables_initializer())
# Equals the number of subnetwork and ensemble trainable variables,
# plus the one 'some_var' created earlier.
self.assertLen(
tf_compat.v1.trainable_variables(),
want_subnetwork_trainable_vars + want_ensemble_trainable_vars + 1)
# Get the real global step outside a subnetwork's context.
self.assertEqual("global_step",
tf_compat.v1.train.get_global_step().op.name)
self.assertEqual("global_step", train.get_global_step().op.name)
self.assertEqual("global_step", tf_v1.train.get_global_step().op.name)
self.assertEqual("global_step", training_util.get_global_step().op.name)
self.assertEqual("global_step",
tf_compat.v1.train.get_or_create_global_step().op.name)
self.assertEqual("global_step",
train.get_or_create_global_step().op.name)
self.assertEqual("global_step",
tf_v1.train.get_or_create_global_step().op.name)
self.assertEqual("global_step",
training_util.get_or_create_global_step().op.name)
# Get global tf.summary outside a subnetwork's context.
self.assertNotEqual("fake_scalar",
tf_compat.v1.summary.scalar("scalar", 1.))
self.assertNotEqual("fake_image",
tf_compat.v1.summary.image("image", 1.))
self.assertNotEqual("fake_histogram",
tf_compat.v1.summary.histogram("histogram", 1.))
self.assertNotEqual("fake_audio",
tf_compat.v1.summary.audio("audio", 1., 1.))
if mode == tf.estimator.ModeKeys.PREDICT:
self.assertAllClose(
want_logits, sess.run(ensemble_spec.ensemble.logits), atol=1e-3)
self.assertIsNone(ensemble_spec.loss)
self.assertIsNone(ensemble_spec.adanet_loss)
self.assertIsNone(ensemble_spec.train_op)
self.assertIsNotNone(ensemble_spec.export_outputs)
if not export_subnetworks:
return
if not multi_head:
subnetwork_logits = sess.run(ensemble_spec.export_outputs[
_EnsembleBuilder._SUBNETWORK_LOGITS_EXPORT_SIGNATURE].outputs)
self.assertAllClose(subnetwork_logits["test"],
sess.run(subnetwork_spec.subnetwork.logits))
subnetwork_last_layer = sess.run(ensemble_spec.export_outputs[
_EnsembleBuilder._SUBNETWORK_LAST_LAYER_EXPORT_SIGNATURE]
.outputs)
self.assertAllClose(subnetwork_last_layer["test"],
sess.run(subnetwork_spec.subnetwork.last_layer))
else:
self.assertIn("subnetwork_logits_head2",
ensemble_spec.export_outputs)
subnetwork_logits_head1 = sess.run(
ensemble_spec.export_outputs["subnetwork_logits_head1"].outputs)
self.assertAllClose(
subnetwork_logits_head1["test"],
sess.run(subnetwork_spec.subnetwork.logits["head1"]))
self.assertIn("subnetwork_logits_head2",
ensemble_spec.export_outputs)
subnetwork_last_layer_head1 = sess.run(
ensemble_spec.export_outputs["subnetwork_last_layer_head1"]
.outputs)
self.assertAllClose(
subnetwork_last_layer_head1["test"],
sess.run(subnetwork_spec.subnetwork.last_layer["head1"]))
return
# Verify that train_op works, previous loss should be greater than loss
# after a train op.
loss = sess.run(ensemble_spec.loss)
train_op = tf.group(subnetwork_spec.train_op.train_op,
ensemble_spec.train_op.train_op)
for _ in range(3):
sess.run(train_op)
self.assertGreater(loss, sess.run(ensemble_spec.loss))
self.assertAllClose(
want_logits, sess.run(ensemble_spec.ensemble.logits), atol=1e-3)
if ensembler_class is ComplexityRegularizedEnsembler:
# Bias should learn a non-zero value when used.
bias = sess.run(ensemble_spec.ensemble.bias)
if isinstance(bias, dict):
bias = sum(abs(b) for b in bias.values())
if use_bias:
self.assertNotEqual(0., bias)
else:
self.assertAlmostEqual(0., bias)
self.assertAlmostEqual(
want_loss, sess.run(ensemble_spec.loss), places=3)
self.assertAlmostEqual(
want_adanet_loss, sess.run(ensemble_spec.adanet_loss), places=3)
if want_predictions:
self.assertAllClose(
want_predictions,
sess.run(ensemble_spec.ensemble.predictions),
atol=1e-3)
class EnsembleBuilderMetricFnTest(parameterized.TestCase, tf.test.TestCase):
def _make_metrics(self,
metric_fn,
mode=tf.estimator.ModeKeys.EVAL,
multi_head=False,
sess=None):
with context.graph_mode():
if multi_head:
head = multi_head_lib.MultiHead(heads=[
binary_class_head.BinaryClassHead(
name="head1", loss_reduction=tf_compat.SUM),
binary_class_head.BinaryClassHead(
name="head2", loss_reduction=tf_compat.SUM)
])
labels = {"head1": tf.constant([0, 1]), "head2": tf.constant([0, 1])}
else:
head = binary_class_head.BinaryClassHead(loss_reduction=tf_compat.SUM)
labels = tf.constant([0, 1])
features = {"x": tf.constant([[1.], [2.]])}
builder = _EnsembleBuilder(head, metric_fn=metric_fn)
subnetwork_manager = _SubnetworkManager(head, metric_fn=metric_fn)
subnetwork_builder = _Builder(
lambda unused0, unused1: tf.no_op(),
lambda unused0, unused1: tf.no_op(),
use_logits_last_layer=True)
subnetwork_spec = subnetwork_manager.build_subnetwork_spec(
name="test",
subnetwork_builder=subnetwork_builder,
summary=_FakeSummary(),
features=features,
mode=mode,
labels=labels)
ensemble_spec = builder.build_ensemble_spec(
name="test",
candidate=EnsembleCandidate("foo", [subnetwork_builder], None),
ensembler=ComplexityRegularizedEnsembler(
mixture_weight_type=MixtureWeightType.SCALAR),
subnetwork_specs=[subnetwork_spec],
summary=_FakeSummary(),
features=features,
iteration_number=0,
labels=labels,
mode=mode,
my_ensemble_index=0,
previous_ensemble_spec=None,
previous_iteration_checkpoint=None)
subnetwork_metric_ops = subnetwork_spec.eval_metrics.eval_metrics_ops()
ensemble_metric_ops = ensemble_spec.eval_metrics.eval_metrics_ops()
evaluate = self.evaluate
if sess is not None:
evaluate = sess.run
evaluate((tf_compat.v1.global_variables_initializer(),
tf_compat.v1.local_variables_initializer()))
evaluate((subnetwork_metric_ops, ensemble_metric_ops))
# Return the idempotent tensor part of the (tensor, op) metrics tuple.
return {
k: evaluate(subnetwork_metric_ops[k][0])
for k in subnetwork_metric_ops
}, {k: evaluate(ensemble_metric_ops[k][0]) for k in ensemble_metric_ops}
def setUp(self):
super(EnsembleBuilderMetricFnTest, self).setUp()
tf_compat.v1.train.create_global_step()
@parameterized.named_parameters(
{
"testcase_name": "mode_train",
"mode": tf.estimator.ModeKeys.TRAIN,
}, {
"testcase_name": "mode_predict",
"mode": tf.estimator.ModeKeys.PREDICT,
})
@test_util.run_in_graph_and_eager_modes
def test_only_adds_metrics_when_evaluating(self, mode):
"""Ensures that metrics are only added during evaluation.
Adding metrics during training will break when running on TPU.
Args:
mode: The mode with which to run the test.
"""
def metric_fn(features):
return {"mean_x": tf_compat.v1.metrics.mean(features["x"])}
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn, mode)
self.assertEmpty(subnetwork_metrics)
self.assertEmpty(ensemble_metrics)
@test_util.run_in_graph_and_eager_modes
def test_should_add_metrics(self):
def _test_metric_fn(metric_fn):
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn)
self.assertIn("mean_x", subnetwork_metrics)
self.assertIn("mean_x", ensemble_metrics)
self.assertEqual(1.5, subnetwork_metrics["mean_x"])
self.assertEqual(1.5, ensemble_metrics["mean_x"])
# assert that it keeps original head metrics
self.assertIn("average_loss", subnetwork_metrics)
self.assertIn("average_loss", ensemble_metrics)
def metric_fn_1(features):
return {"mean_x": tf_compat.v1.metrics.mean(features["x"])}
# TODO: Add support for tf.keras.metrics.Mean like `add_metrics`.
_test_metric_fn(metric_fn_1)
@test_util.run_in_graph_and_eager_modes
def test_should_error_out_for_not_recognized_args(self):
head = binary_class_head.BinaryClassHead(loss_reduction=tf_compat.SUM)
def metric_fn(features, not_recognized):
_, _ = features, not_recognized
return {}
with self.assertRaisesRegexp(ValueError, "not_recognized"):
_EnsembleBuilder(head, metric_fn=metric_fn)
@test_util.run_in_graph_and_eager_modes
def test_all_supported_args(self):
def metric_fn(features, predictions, labels):
self.assertIn("x", features)
self.assertIsNotNone(labels)
self.assertIn("logistic", predictions)
return {}
self._make_metrics(metric_fn)
@test_util.run_in_graph_and_eager_modes
def test_all_supported_args_in_different_order(self):
def metric_fn(labels, features, predictions):
self.assertIn("x", features)
self.assertIsNotNone(labels)
self.assertIn("logistic", predictions)
return {}
self._make_metrics(metric_fn)
@test_util.run_in_graph_and_eager_modes
def test_all_args_are_optional(self):
def _test_metric_fn(metric_fn):
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn)
self.assertEqual(2., subnetwork_metrics["two"])
self.assertEqual(2., ensemble_metrics["two"])
def metric_fn_1():
return {"two": tf_compat.v1.metrics.mean(tf.constant([2.]))}
# TODO: Add support for tf.keras.metrics.Mean like `add_metrics`.
_test_metric_fn(metric_fn_1)
@test_util.run_in_graph_and_eager_modes
def test_overrides_existing_metrics(self):
def _test_metric_fn(metric_fn):
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn=None)
self.assertNotEqual(2., subnetwork_metrics["average_loss"])
self.assertNotEqual(2., ensemble_metrics["average_loss"])
with tf.Graph().as_default() as g, self.test_session(g) as sess:
subnetwork_metrics, ensemble_metrics = self._make_metrics(
metric_fn=metric_fn, sess=sess)
self.assertEqual(2., subnetwork_metrics["average_loss"])
self.assertEqual(2., ensemble_metrics["average_loss"])
def metric_fn_1():
return {"average_loss": tf_compat.v1.metrics.mean(tf.constant([2.]))}
# TODO: Add support for tf.keras.metrics.Mean like `add_metrics`.
_test_metric_fn(metric_fn_1)
@test_util.run_in_graph_and_eager_modes
def test_multi_head(self):
"""Tests b/123084079."""
def metric_fn(predictions):
self.assertIn(("head1", "logits"), predictions)
self.assertIn(("head2", "logits"), predictions)
return {}
self._make_metrics(metric_fn, multi_head=True)
@test_util.run_in_graph_and_eager_modes
def test_operation_metrics(self):
def metric_fn():
var = tf_compat.v1.get_variable(
"metric_var",
shape=[],
trainable=False,
initializer=tf_compat.v1.zeros_initializer(),
collections=[tf_compat.v1.GraphKeys.LOCAL_VARIABLES])
# A metric with an op that doesn't return a Tensor.
op = tf.group(tf_compat.v1.assign_add(var, 1))
return {"operation_metric": (var, op)}
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn)
self.assertEqual(1., subnetwork_metrics["operation_metric"])
self.assertEqual(1., ensemble_metrics["operation_metric"])
@test_util.run_in_graph_and_eager_modes
def test_eval_metric_different_shape_op(self):
def metric_fn():
var = tf_compat.v1.get_variable(
"metric_var",
shape=[2],
trainable=False,
initializer=tf_compat.v1.zeros_initializer(),
collections=[tf_compat.v1.GraphKeys.LOCAL_VARIABLES])
# Shape of metric different from shape of op
op = tf_compat.v1.assign_add(var, [1, 2])
metric = tf.reshape(var[0] + var[1], [])
return {"different_shape_metric": (metric, op)}
subnetwork_metrics, ensemble_metrics = self._make_metrics(metric_fn)
self.assertEqual(3., subnetwork_metrics["different_shape_metric"])
self.assertEqual(3., ensemble_metrics["different_shape_metric"])
if __name__ == "__main__":
tf.test.main()
| 31,597 | 37.770552 | 83 | py |
adanet | adanet-master/adanet/core/eval_metrics_test.py | """Tests for AdaNet eval metrics.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.architecture import _Architecture
from adanet.core.eval_metrics import _call_eval_metrics
import adanet.core.testing_utils as tu
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class MetricsTest(tu.AdanetTestCase):
def setup_graph(self):
# We only test the multi head since this is the general case.
self._features = {"x": tf.constant([[1.], [2.]])}
heads = ("head_1", "head_2")
labels = tf.constant([0, 1])
self._labels = {head: labels for head in heads}
predictions = {(head, "predictions"): labels for head in heads}
loss = tf.constant(2.)
self._estimator_spec = tf_compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
predictions=predictions,
eval_metrics=(self._spec_metric_fn, {
"features": self._features,
"labels": self._labels,
"predictions": predictions,
"loss": loss
}))
def _run_metrics(self, metrics):
metric_ops = metrics
if isinstance(metric_ops, tuple):
metric_ops = _call_eval_metrics(metric_ops)
self.evaluate((tf_compat.v1.global_variables_initializer(),
tf_compat.v1.local_variables_initializer()))
self.evaluate(metric_ops)
return {k: self.evaluate(metric_ops[k][0]) for k in metric_ops}
def _assert_tensors_equal(self, actual, expected):
actual, expected = self.evaluate((actual, expected))
self.assertEqual(actual, expected)
def _spec_metric_fn(self, features, labels, predictions, loss):
actual = [features, labels, predictions, loss]
expected = [
self._features, self._labels, self._estimator_spec.predictions,
self._estimator_spec.loss
]
self._assert_tensors_equal(actual, expected)
return {"metric_1": tf_compat.v1.metrics.mean(tf.constant(1.))}
def _metric_fn(self, features, predictions):
actual = [features, predictions]
expected = [self._features, self._estimator_spec.predictions]
self._assert_tensors_equal(actual, expected)
return {"metric_2": tf_compat.v1.metrics.mean(tf.constant(2.))}
@parameterized.named_parameters(
{
"testcase_name": "use_tpu",
"use_tpu": True,
},
{
# TODO: Figure out why this gives error in TF 2.0:
# ValueError: Please call update_state(...) on the "mean_1" metric.
"testcase_name": "not_use_tpu",
"use_tpu": False,
})
@test_util.run_in_graph_and_eager_modes
def test_subnetwork_metrics(self, use_tpu):
with context.graph_mode():
self.setup_graph()
spec = self._estimator_spec
if not use_tpu:
spec = spec.as_estimator_spec()
metrics = tu.create_subnetwork_metrics(
self._metric_fn,
use_tpu=use_tpu,
features=self._features,
labels=self._labels,
estimator_spec=spec)
actual = self._run_metrics(metrics.eval_metrics_tuple())
expected = {"loss": 2., "metric_1": 1., "metric_2": 2.}
self.assertEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes
def test_subnetwork_metrics_user_metric_fn_overrides_metrics(self):
with context.graph_mode():
self.setup_graph()
overridden_value = 100.
def _overriding_metric_fn():
value = tf.constant(overridden_value)
return {"metric_1": tf_compat.v1.metrics.mean(value)}
metrics = tu.create_subnetwork_metrics(
_overriding_metric_fn,
features=self._features,
labels=self._labels,
estimator_spec=self._estimator_spec)
actual = self._run_metrics(metrics.eval_metrics_tuple())
expected = {"loss": 2., "metric_1": overridden_value}
self.assertEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes
def test_ensemble_metrics(self):
with context.graph_mode():
self.setup_graph()
architecture = _Architecture("test_ensemble_candidate", "test_ensembler")
architecture.add_subnetwork(iteration_number=0, builder_name="b_0_0")
architecture.add_subnetwork(iteration_number=0, builder_name="b_0_1")
architecture.add_subnetwork(iteration_number=1, builder_name="b_1_0")
architecture.add_subnetwork(iteration_number=2, builder_name="b_2_0")
metrics = tu.create_ensemble_metrics(
self._metric_fn,
features=self._features,
labels=self._labels,
estimator_spec=self._estimator_spec,
architecture=architecture)
actual = self._run_metrics(metrics.eval_metrics_tuple())
serialized_arch_proto = actual["architecture/adanet/ensembles"]
expected_arch_string = b"| b_0_0 | b_0_1 | b_1_0 | b_2_0 |"
self.assertIn(expected_arch_string, serialized_arch_proto)
@parameterized.named_parameters(
{
"testcase_name": "use_tpu_evaluating",
"use_tpu": True,
"mode": tf.estimator.ModeKeys.EVAL,
}, {
"testcase_name": "use_tpu_not_evaluating",
"use_tpu": True,
"mode": tf.estimator.ModeKeys.TRAIN,
}, {
"testcase_name": "not_use_tpu_evaluating",
"use_tpu": False,
"mode": tf.estimator.ModeKeys.EVAL,
}, {
"testcase_name": "not_use_tpu_not_evaluating",
"use_tpu": False,
"mode": tf.estimator.ModeKeys.TRAIN,
})
@test_util.run_in_graph_and_eager_modes
def test_iteration_metrics(self, use_tpu, mode):
with context.graph_mode():
self.setup_graph()
best_candidate_index = 3
ensemble_metrics = []
for i in range(10):
def metric_fn(val=i):
metric = tf.keras.metrics.Mean()
metric.update_state(tf.constant(val))
return {
"ensemble_v1_metric": tf_compat.v1.metrics.mean(tf.constant(val)),
"ensemble_keras_metric": metric
}
ensemble_metrics.append(tu.create_ensemble_metrics(metric_fn))
metrics = tu.create_iteration_metrics(ensemble_metrics=ensemble_metrics)
metrics_fn = (
metrics.best_eval_metrics_tuple
if use_tpu else metrics.best_eval_metric_ops)
actual = self._run_metrics(
metrics_fn(tf.constant(best_candidate_index), mode) or {})
if mode == tf.estimator.ModeKeys.EVAL:
expected = {
"ensemble_v1_metric": best_candidate_index,
"ensemble_keras_metric": best_candidate_index,
"iteration": 1
}
# We don't actually provide an architecture, so the default will be
# inside.
del actual["architecture/adanet/ensembles"]
else:
expected = {}
self.assertEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes
def test_metric_ops_not_duplicated_on_cpu(self):
with context.graph_mode():
self.setup_graph()
metric_fn = lambda: {"metric": (tf.constant(5), tf.constant(5))}
best_candidate_index = 3
mode = tf.estimator.ModeKeys.EVAL
ensemble_metrics = tu.create_ensemble_metrics(metric_fn)
subnetwork_metrics = tu.create_subnetwork_metrics(metric_fn)
iteration_metrics = tu.create_iteration_metrics(
ensemble_metrics=[ensemble_metrics],
subnetwork_metrics=[subnetwork_metrics])
ensemble_ops1 = ensemble_metrics.eval_metrics_ops()
ensemble_ops2 = ensemble_metrics.eval_metrics_ops()
subnetwork_ops1 = subnetwork_metrics.eval_metrics_ops()
subnetwork_ops2 = subnetwork_metrics.eval_metrics_ops()
iteration_ops1 = iteration_metrics.best_eval_metric_ops(
best_candidate_index, mode)
iteration_ops2 = iteration_metrics.best_eval_metric_ops(
best_candidate_index, mode)
self.assertEqual(subnetwork_ops1, subnetwork_ops2)
self.assertEqual(ensemble_ops1, ensemble_ops2)
self.assertEqual(iteration_ops1, iteration_ops2)
for ops in [ensemble_ops1, subnetwork_ops1, iteration_ops1]:
self.assertIsNotNone(ops)
if __name__ == "__main__":
tf.test.main()
| 8,994 | 35.714286 | 80 | py |
adanet | adanet-master/adanet/core/estimator_distributed_test_runner.py | # List as: python2, python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Used to run estimators for distributed tests.
In distributed tests, we spawn processes to run estimator tasks like chief,
workers, parameter servers. The role of each task is determined by the TF_CONFIG
environment variable.
For more information on how tf.estimator.RunConfig uses TF_CONFIG, see
https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import json
import os
import sys
# Allow this file to import adanet.
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "../.."))
# pylint: disable=g-import-not-at-top
from absl import app
from absl import flags
from absl import logging
from adanet import tf_compat
from adanet.autoensemble.estimator import AutoEnsembleEstimator
from adanet.core.estimator import Estimator
from adanet.core.evaluator import Evaluator
from adanet.distributed.placement import RoundRobinStrategy
from adanet.subnetwork import Builder
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
# TODO: Switch back to TF 2.0 once the distribution bug is fixed.
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
# Contrib
try:
from tensorflow.contrib.boosted_trees.python.utils import losses as bt_losses
except ImportError:
# Not much we can do here except skip the test.
bt_losses = None
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow_estimator.python.estimator import training as training_lib
from tensorflow_estimator.python.estimator.canned import head as head_lib
# pylint: enable=g-import-not-at-top
# pylint: enable=g-direct-tensorflow-import
flags.DEFINE_enum("estimator_type", "estimator", [
"estimator", "autoensemble", "autoensemble_trees_multiclass",
"estimator_with_experimental_multiworker_strategy"
], "The estimator type to train.")
flags.DEFINE_enum("placement_strategy", "replication", [
"replication",
"round_robin",
], "The distributed placement strategy.")
flags.DEFINE_string("model_dir", "", "The model directory.")
FLAGS = flags.FLAGS
class SessionManager(session_manager_lib.SessionManager):
"""A session manager with a shorter recovery time."""
def __init__(self, *args, **kwargs):
# Reduced wait time.
kwargs["recovery_wait_secs"] = .5
super(SessionManager, self).__init__(*args, **kwargs)
@contextlib.contextmanager
def _monkey_patch_distributed_training_times():
"""Monkey-patches global attributes with subnetwork-specifics ones."""
old_delay_secs_per_worker = training_lib._DELAY_SECS_PER_WORKER # pylint: disable=protected-access
old_session_manager = session_manager_lib.SessionManager
old_min_max_variable_partitioner = (
partitioned_variables.min_max_variable_partitioner)
# monkey-patch global attributes.
session_manager_lib.SessionManager = SessionManager
# Override default delay per worker to speed up tests.
training_lib._DELAY_SECS_PER_WORKER = .2 # pylint: disable=protected-access
# NOTE: DNNEstimator uses min-max partitioner under the hood which will not
# partition layers unless they are above a certain size. In order to test that
# we handle partitioned variables correctly in distributed training we patch
# the min size to be significantly lower. For more context, see b/133435012
# and b/136958627. For some reason, creating a custom DNN using a fixed
# partitioner does not cause the issues described in the bugs so we must test
# DNNEstimator.
def patched_min_max_variable_partitioner(max_partitions=1,
axis=0,
min_slice_size=64,
bytes_per_string_element=16):
del min_slice_size # Unused, min_slice_size is patched to be constant.
return old_min_max_variable_partitioner(
max_partitions=max_partitions,
axis=axis,
min_slice_size=64,
bytes_per_string_element=bytes_per_string_element)
partitioned_variables.min_max_variable_partitioner = (
patched_min_max_variable_partitioner)
try:
yield
finally:
# Revert monkey-patches.
session_manager_lib.SessionManager = old_session_manager
training_lib._DELAY_SECS_PER_WORKER = old_delay_secs_per_worker # pylint: disable=protected-access
partitioned_variables.min_max_variable_partitioner = (
old_min_max_variable_partitioner)
class _DNNBuilder(Builder):
"""A simple DNN subnetwork builder."""
def __init__(self, name, config, layer_size=3, seed=13):
self._name = name
self._layer_size = layer_size
self._config = config
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
num_ps_replicas = self._config.num_ps_replicas if self._config else 0
partitioner = tf_compat.v1.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with tf_compat.v1.variable_scope("dnn", partitioner=partitioner):
shared = {}
with tf_compat.v1.variable_scope("hidden_layer"):
w = tf_compat.v1.get_variable(
shape=[2, self._layer_size],
initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed),
name="weight")
hidden_layer = tf.matmul(features["x"], w)
if previous_ensemble:
other_hidden_layer = previous_ensemble.weighted_subnetworks[
-1].subnetwork.shared["hidden_layer"]
hidden_layer = tf.concat([hidden_layer, other_hidden_layer], axis=1)
# Use a leaky-relu activation so that gradients can flow even when
# outputs are negative. Leaky relu has a non-zero slope when x < 0.
# Otherwise success at learning is completely dependent on random seed.
hidden_layer = tf.nn.leaky_relu(hidden_layer, alpha=.2)
shared["hidden_layer"] = hidden_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
hidden_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=seed))
summary.scalar("scalar", 3)
return Subnetwork(
last_layer=logits, logits=logits, complexity=3, shared=shared)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.AdamOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def train_and_evaluate_estimator():
"""Runs Estimator distributed training."""
# The tf.estimator.RunConfig automatically parses the TF_CONFIG environment
# variables during construction.
# For more information on how tf.estimator.RunConfig uses TF_CONFIG, see
# https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig.
config = tf.estimator.RunConfig(
tf_random_seed=42,
save_checkpoints_steps=10,
save_checkpoints_secs=None,
# Keep all checkpoints to avoid checkpoint GC causing failures during
# evaluation.
# TODO: Prevent checkpoints that are currently being
# evaluated by another process from being garbage collected.
keep_checkpoint_max=None,
model_dir=FLAGS.model_dir,
session_config=tf_compat.v1.ConfigProto(
log_device_placement=False,
# Ignore other workers; only talk to parameter servers.
# Otherwise, when a chief/worker terminates, the others will hang.
device_filters=["/job:ps"]))
def input_fn():
input_features = {"x": tf.constant(features, name="x")}
input_labels = tf.constant(labels, name="y")
return tf.data.Dataset.from_tensors((input_features, input_labels)).repeat()
kwargs = {
"max_iteration_steps": 100,
"force_grow": True,
"delay_secs_per_worker": .2,
"max_worker_delay_secs": 1,
"worker_wait_secs": 1,
# Set low timeout to reduce wait time for failures.
"worker_wait_timeout_secs": 180,
"evaluator": Evaluator(input_fn, steps=10),
"config": config
}
head = head_lib._regression_head( # pylint: disable=protected-access
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
features = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
labels = [[1.], [0.], [1.], [0.]]
estimator_type = FLAGS.estimator_type
if FLAGS.placement_strategy == "round_robin":
kwargs["experimental_placement_strategy"] = RoundRobinStrategy()
if estimator_type == "autoensemble":
feature_columns = [tf.feature_column.numeric_column("x", shape=[2])]
# pylint: disable=g-long-lambda
# TODO: Switch optimizers to tf.keras.optimizers.Adam once the
# distribution bug is fixed.
candidate_pool = {
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=lambda: tf_compat.v1.train.AdamOptimizer(
learning_rate=.001)),
"dnn":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=lambda: tf_compat.v1.train.AdamOptimizer(
learning_rate=.001),
hidden_units=[3]),
"dnn2":
tf.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=lambda: tf_compat.v1.train.AdamOptimizer(
learning_rate=.001),
hidden_units=[10, 10]),
}
# pylint: enable=g-long-lambda
estimator = AutoEnsembleEstimator(
head=head, candidate_pool=candidate_pool, **kwargs)
elif estimator_type == "estimator":
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn1", config, layer_size=3),
_DNNBuilder("dnn2", config, layer_size=4),
_DNNBuilder("dnn3", config, layer_size=5),
])
estimator = Estimator(
head=head, subnetwork_generator=subnetwork_generator, **kwargs)
elif FLAGS.estimator_type == "autoensemble_trees_multiclass":
if not bt_losses:
logging.warning(
"Skipped autoensemble_trees_multiclass test since contrib is missing."
)
return
n_classes = 3
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes=n_classes,
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
def tree_loss_fn(labels, logits):
result = bt_losses.per_example_maxent_loss(
labels=labels, logits=logits, num_classes=n_classes, weights=None)
return result[0]
tree_head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
loss_fn=tree_loss_fn,
n_classes=n_classes,
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
labels = [[1], [0], [1], [2]]
feature_columns = [tf.feature_column.numeric_column("x", shape=[2])]
# TODO: Switch optimizers to tf.keras.optimizers.Adam once the
# distribution bug is fixed.
candidate_pool = lambda config: { # pylint: disable=g-long-lambda
"linear":
tf.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=tf_compat.v1.train.AdamOptimizer(
learning_rate=.001),
config=config),
"gbdt":
tf.estimator.BoostedTreesEstimator(
head=tree_head,
feature_columns=feature_columns,
n_trees=10,
n_batches_per_layer=1,
center_bias=False,
config=config),
}
estimator = AutoEnsembleEstimator(
head=head, candidate_pool=candidate_pool, **kwargs)
elif estimator_type == "estimator_with_experimental_multiworker_strategy":
def _model_fn(features, labels, mode):
"""Test model_fn."""
layer = tf.keras.layers.Dense(1)
logits = layer(features["x"])
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"logits": logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.losses.mean_squared_error(
labels=labels,
predictions=logits,
reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(0.2)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
if json.loads(os.environ["TF_CONFIG"])["task"]["type"] == "evaluator":
# The evaluator job would crash if MultiWorkerMirroredStrategy is called.
distribution = None
else:
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
multiworker_config = tf.estimator.RunConfig(
tf_random_seed=42,
model_dir=FLAGS.model_dir,
train_distribute=distribution,
session_config=tf_compat.v1.ConfigProto(log_device_placement=False))
# TODO: Replace with adanet.Estimator. Currently this just verifies
# that the distributed testing framework supports distribute strategies.
estimator = tf.estimator.Estimator(
model_fn=_model_fn, config=multiworker_config)
train_hooks = [
tf.estimator.ProfilerHook(save_steps=50, output_dir=FLAGS.model_dir)
]
# Train for three iterations.
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn, max_steps=300, hooks=train_hooks)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn, steps=1, start_delay_secs=.5, throttle_secs=.05)
# Calling train_and_evaluate is the official way to perform distributed
# training with an Estimator. Calling Estimator#train directly results
# in an error when the TF_CONFIG is setup for a cluster.
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def main(argv):
del argv # Unused.
# Reduce hard-coded waits, delays, and timeouts for quicker tests.
with _monkey_patch_distributed_training_times():
train_and_evaluate_estimator()
if __name__ == "__main__":
app.run(main)
| 15,527 | 37.626866 | 111 | py |
adanet | adanet-master/adanet/core/estimator_test.py | """Test AdaNet estimator single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from absl import logging
from absl.testing import parameterized
from adanet import replay
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.estimator import Estimator
from adanet.core.evaluator import Evaluator
from adanet.core.report_materializer import ReportMaterializer
from adanet.distributed.placement import RoundRobinStrategy
from adanet.ensemble import AllStrategy
from adanet.ensemble import ComplexityRegularizedEnsembler
from adanet.ensemble import GrowStrategy
from adanet.ensemble import MixtureWeightType
from adanet.ensemble import SoloStrategy
from adanet.subnetwork import Builder
from adanet.subnetwork import Generator
from adanet.subnetwork import MaterializedReport
from adanet.subnetwork import Report
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
from adanet.subnetwork import TrainOpSpec
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.tools import saved_model_utils
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.canned.head import _binary_logistic_head_with_sigmoid_cross_entropy_loss as binary_class_head_v1
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib
from tensorflow_estimator.python.estimator.head import regression_head
logging.set_verbosity(logging.INFO)
XOR_FEATURES = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
XOR_LABELS = [[1.], [0.], [1.], [0.]]
class _DNNBuilder(Builder):
"""A simple DNN subnetwork builder."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
subnetwork_chief_hooks=None,
subnetwork_hooks=None,
mixture_weight_chief_hooks=None,
mixture_weight_hooks=None,
seed=13):
self._name = name
self._learning_rate = learning_rate
self._mixture_weight_learning_rate = mixture_weight_learning_rate
self._return_penultimate_layer = return_penultimate_layer
self._layer_size = layer_size
self._subnetwork_chief_hooks = subnetwork_chief_hooks
self._subnetwork_hooks = subnetwork_hooks
self._mixture_weight_chief_hooks = mixture_weight_chief_hooks
self._mixture_weight_hooks = mixture_weight_hooks
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("dnn"):
persisted_tensors = {}
with tf_compat.v1.variable_scope("hidden_layer"):
w = tf_compat.v1.get_variable(
shape=[2, self._layer_size],
initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed),
name="weight")
disjoint_op = tf.constant([1], name="disjoint_op")
with tf_compat.v1.colocate_with(disjoint_op): # tests b/118865235
hidden_layer = tf.matmul(features["x"], w)
if previous_ensemble:
other_hidden_layer = previous_ensemble.weighted_subnetworks[
-1].subnetwork.persisted_tensors["hidden_layer"]
hidden_layer = tf.concat([hidden_layer, other_hidden_layer], axis=1)
# Use a leaky-relu activation so that gradients can flow even when
# outputs are negative. Leaky relu has a non-zero slope when x < 0.
# Otherwise success at learning is completely dependent on random seed.
hidden_layer = tf.nn.leaky_relu(hidden_layer, alpha=.2)
persisted_tensors["hidden_layer"] = hidden_layer
if training:
# This change will only be in the next iteration if
# `freeze_training_graph` is `True`.
persisted_tensors["hidden_layer"] = 2 * hidden_layer
last_layer = hidden_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
hidden_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
summary.scalar("scalar", 3)
batch_size = features["x"].get_shape().as_list()[0]
summary.image("image", tf.ones([batch_size, 3, 3, 1]))
with tf_compat.v1.variable_scope("nested"):
summary.scalar("scalar", 5)
return Subnetwork(
last_layer=last_layer if self._return_penultimate_layer else logits,
logits=logits,
complexity=3,
persisted_tensors=persisted_tensors,
shared=persisted_tensors)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._subnetwork_hooks:
return train_op
return TrainOpSpec(train_op, self._subnetwork_chief_hooks,
self._subnetwork_hooks)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._mixture_weight_learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._mixture_weight_hooks:
return train_op
return TrainOpSpec(train_op, self._mixture_weight_chief_hooks,
self._mixture_weight_hooks)
def build_subnetwork_report(self):
return Report(
hparams={"layer_size": self._layer_size},
attributes={"complexity": tf.constant(3, dtype=tf.int32)},
metrics={
"moo": (tf.constant(3,
dtype=tf.int32), tf.constant(3, dtype=tf.int32))
})
class _SimpleBuilder(Builder):
"""A simple subnetwork builder that takes feature_columns."""
def __init__(self, name, feature_columns, seed=42):
self._name = name
self._feature_columns = feature_columns
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("simple"):
input_layer = tf_compat.v1.feature_column.input_layer(
features=features, feature_columns=self._feature_columns)
last_layer = input_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
last_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
return Subnetwork(
last_layer=last_layer,
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
class _NanLossBuilder(Builder):
"""A subnetwork builder always produces a NaN loss."""
@property
def name(self):
return "nan"
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=42)) * np.nan
return Subnetwork(last_layer=logits, logits=logits, complexity=0)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return tf.no_op()
class _FrozenLinearBuilder(Builder):
"""A simple linear subnetwork builder that doesn't train."""
def __init__(self, name, seed=42):
self._name = name
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=self._seed))
return Subnetwork(
last_layer=features["x"],
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return tf.no_op()
class _FakeGenerator(Generator):
"""Generator that exposed generate_candidates' arguments."""
def __init__(self, spy_fn, subnetwork_builders):
"""Checks the arguments passed to generate_candidates.
Args:
spy_fn: (iteration_number, previous_ensemble_reports, all_reports) -> ().
Spies on the arguments passed to generate_candidates whenever it is
called.
subnetwork_builders: List of `Builder`s to return in every call to
generate_candidates.
"""
self._spy_fn = spy_fn
self._subnetwork_builders = subnetwork_builders
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""Spys on arguments passed in, then returns a fixed list of candidates."""
del previous_ensemble # unused
self._spy_fn(iteration_number, previous_ensemble_reports, all_reports)
return self._subnetwork_builders
class _WidthLimitingDNNBuilder(_DNNBuilder):
"""Limits the width of the previous_ensemble."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
width_limit=None,
seed=13):
if width_limit is not None and width_limit == 0:
raise ValueError("width_limit must be at least 1 or None.")
super(_WidthLimitingDNNBuilder,
self).__init__(name, learning_rate, mixture_weight_learning_rate,
return_penultimate_layer, layer_size, seed)
self._width_limit = width_limit
def prune_previous_ensemble(self, previous_ensemble):
indices = list(range(len(previous_ensemble.weighted_subnetworks)))
if self._width_limit is None:
return indices
if self._width_limit == 1:
return []
return indices[-self._width_limit + 1:] # pylint: disable=invalid-unary-operand-type
class _FakeEvaluator(object):
"""Fakes an `adanet.Evaluator`."""
def __init__(self, input_fn):
self._input_fn = input_fn
@property
def input_fn(self):
"""Return the input_fn."""
return self._input_fn
@property
def steps(self):
"""Return the number of evaluation steps."""
return 1
@property
def metric_name(self):
"""Returns the name of the metric being optimized."""
return "adanet_loss"
@property
def objective_fn(self):
"""Always returns the minimize objective."""
return np.nanargmin
def evaluate(self, sess, ensemble_metrics):
"""Abstract method to be overridden in subclasses."""
del sess, ensemble_metrics # Unused.
raise NotImplementedError
class _AlwaysLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-1] = 0.
return losses
class _AlwaysSecondToLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the second to last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-2] = 0.
return losses
class _EarlyStoppingHook(tf_compat.SessionRunHook):
"""Hook that immediately requests training to stop."""
def after_run(self, run_context, run_values):
run_context.request_stop()
class EstimatorTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "one_step",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": None,
"want_loss": 0.49899703,
"want_iteration": 0,
"want_global_step": 1,
},
{
"testcase_name": "enable_v2_checkpoint",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 100,
"steps": 300,
"max_steps": None,
"want_loss": 0.3221922,
"want_iteration": 2,
"want_global_step": 300,
"enable_v2_checkpoint": True,
},
{
"testcase_name": "none_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": None,
"steps": 300,
"max_steps": None,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"steps": 300,
"max_steps": None,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_two_max_iteration_fewer_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_iterations": 2,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_no_bias",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"use_bias": False,
"want_loss": 0.496736,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name":
"single_builder_subnetwork_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
subnetwork_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
subnetwork_hooks=[tu.ModifierSessionRunHook("hook_var")])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_mixture_weight_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
mixture_weight_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
mixture_weight_hooks=[
tu.ModifierSessionRunHook("hook_var")
])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_scalar_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.SCALAR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_vector_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.VECTOR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name": "single_builder_replicate_ensemble_in_training",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"replicate_ensemble_in_training": True,
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420215,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_with_hook",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"hooks": [tu.ModifierSessionRunHook()],
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "high_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 500,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name":
"two_builders",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", seed=99)]),
"max_iteration_steps":
200,
"want_loss":
0.27713922,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_different_layer_sizes",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_one_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
None,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_one_max_iteration_two_hundred_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
300,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_two_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
2,
"steps":
None,
"max_steps":
None,
"want_loss":
0.26503286,
"want_iteration":
1,
"want_global_step":
400,
},
{
"testcase_name":
"two_builders_different_layer_sizes_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"two_dnn_export_subnetworks",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
"export_subnetworks":
True,
},
{
"testcase_name":
"width_limiting_builder_no_pruning",
"subnetwork_generator":
SimpleGenerator([_WidthLimitingDNNBuilder("no_pruning")]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_some_pruning",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("some_pruning", width_limit=2)]),
"max_iteration_steps":
75,
"want_loss":
0.38592532,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_prune_all",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("prune_all", width_limit=1)]),
"max_iteration_steps":
75,
"want_loss":
0.43161362,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_mixed",
"subnetwork_generator":
SimpleGenerator([
_WidthLimitingDNNBuilder("no_pruning"),
_WidthLimitingDNNBuilder("some_pruning", width_limit=2),
_WidthLimitingDNNBuilder("prune_all", width_limit=1)
]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_good_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.36189985,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_bad_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[1.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.31389591,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_second_to_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysSecondToLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.32487726,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"report_materializer",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"report_materializer":
ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.29196805,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy_multiple_ensemblers",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"ensemblers": [
ComplexityRegularizedEnsembler(),
ComplexityRegularizedEnsembler(use_bias=True, name="with_bias")
],
"max_iteration_steps":
200,
"want_loss":
0.23053232,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.35249719,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.36163166,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"multi_ensemble_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies":
[AllStrategy(), GrowStrategy(),
SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.24838975,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"dataset_train_input_fn",
"subnetwork_generator":
SimpleGenerator([_DNNBuilder("dnn")]),
# pylint: disable=g-long-lambda
"train_input_fn":
lambda: tf.data.Dataset.from_tensors(({
"x": XOR_FEATURES
}, XOR_LABELS)).repeat(),
# pylint: enable=g-long-lambda
"max_iteration_steps":
100,
"want_loss":
0.32219219,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"early_stopping_subnetwork",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", subnetwork_hooks=[_EarlyStoppingHook()])
]),
"max_iteration_steps":
100,
"max_steps":
200,
"want_loss":
0.2958503,
# Since one subnetwork stops after 1 step and global step is the
# mean of iteration steps, global step will be incremented at half
# the rate.
"want_iteration":
3,
"want_global_step":
200,
})
def test_lifecycle(self,
subnetwork_generator,
want_loss,
want_iteration,
want_global_step,
max_iteration_steps,
mixture_weight_type=MixtureWeightType.MATRIX,
evaluator=None,
use_bias=True,
replicate_ensemble_in_training=False,
hooks=None,
ensemblers=None,
ensemble_strategies=None,
max_steps=300,
steps=None,
report_materializer=None,
train_input_fn=None,
max_iterations=None,
export_subnetworks=False,
enable_v2_checkpoint=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
def _metric_fn(predictions):
mean = tf.keras.metrics.Mean()
mean.update_state(predictions["predictions"])
return {"keras_mean": mean}
default_ensembler_kwargs = {
"mixture_weight_type": mixture_weight_type,
"mixture_weight_initializer": tf_compat.v1.zeros_initializer(),
"warm_start_mixture_weights": True,
"use_bias": use_bias,
"enable_v2_checkpoint": enable_v2_checkpoint,
}
if ensemblers:
default_ensembler_kwargs = {}
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
evaluator=evaluator,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
report_materializer=report_materializer,
replicate_ensemble_in_training=replicate_ensemble_in_training,
metric_fn=_metric_fn,
model_dir=self.test_subdirectory,
config=run_config,
max_iterations=max_iterations,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks,
**default_ensembler_kwargs)
if not train_input_fn:
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(
input_fn=train_input_fn, steps=steps, max_steps=max_steps, hooks=hooks)
# Evaluate.
eval_results = estimator.evaluate(
input_fn=train_input_fn, steps=10, hooks=hooks)
logging.info("%s", eval_results)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
self.assertEqual(want_global_step, eval_results["global_step"])
self.assertEqual(want_iteration, eval_results["iteration"])
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits", export_signature_def.keys())
self.assertIn("subnetwork_last_layer", export_signature_def.keys())
@parameterized.named_parameters(
{
"testcase_name":
"hash_bucket_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)))
),
}, {
"testcase_name":
"vocab_list_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)))),
}, {
"testcase_name":
"hash_bucket_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)),
dimension=2)),
}, {
"testcase_name":
"vocab_list_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)),
dimension=2)),
})
def test_categorical_columns(self, feature_column):
def train_input_fn():
input_features = {
"human_names": tf.constant([["alice"], ["bob"]], name="human_names")
}
input_labels = tf.constant([[1.], [0.]], name="starts_with_a")
return input_features, input_labels
report_materializer = ReportMaterializer(input_fn=train_input_fn, steps=1)
estimator = Estimator(
head=regression_head.RegressionHead(),
subnetwork_generator=SimpleGenerator(
[_SimpleBuilder(name="simple", feature_columns=[feature_column])]),
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory)
estimator.train(input_fn=train_input_fn, max_steps=3)
@parameterized.named_parameters(
{
"testcase_name": "no_subnetwork_generator",
"subnetwork_generator": None,
"max_iteration_steps": 100,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 0,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": 0,
"want_error": ValueError,
},
{
"testcase_name": "steps_and_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": 1,
"want_error": ValueError,
},
{
"testcase_name": "zero_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 0,
"max_steps": None,
"want_error": ValueError,
},
{
"testcase_name": "nan_loss_builder",
"subnetwork_generator": SimpleGenerator([_NanLossBuilder()]),
"max_iteration_steps": 1,
"max_steps": None,
"want_error": tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_first",
"subnetwork_generator":
SimpleGenerator([
_NanLossBuilder(),
_DNNBuilder("dnn"),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
)
def test_train_error(self,
subnetwork_generator,
max_iteration_steps,
want_error,
steps=None,
max_steps=10,
max_iterations=None):
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
with self.assertRaises(want_error):
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
max_iterations=max_iterations,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, steps=steps, max_steps=max_steps)
def test_binary_head_asserts_are_disabled(self):
"""Tests b/140267630."""
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
])
estimator = Estimator(
head=binary_class_head_v1(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
model_dir=self.test_subdirectory)
eval_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.evaluate(input_fn=eval_input_fn, steps=1)
class KerasCNNBuilder(Builder):
"""Builds a CNN subnetwork for AdaNet."""
def __init__(self, name, learning_rate, num_dense, units=3, seed=42):
"""Initializes a `SimpleCNNBuilder`.
Args:
name: String name.
learning_rate: The float learning rate to use.
num_dense: Number of layers.
units: Units per layer.
seed: The random seed.
Returns:
An instance of `SimpleCNNBuilder`.
"""
self._name = name
self._learning_rate = learning_rate
self._num_dense = num_dense
self._units = units
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
images = list(features.values())[0]
images = tf.reshape(images, [-1, 2, 2, 1])
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = images
x = tf.keras.layers.Conv2D(
filters=3,
kernel_size=1,
padding="same",
activation="relu",
kernel_initializer=kernel_initializer)(
x)
x = tf.keras.layers.MaxPool2D(pool_size=1, strides=1)(x)
x = tf.keras.layers.Flatten()(x)
for _ in range(self._num_dense):
x = tf_compat.v1.layers.Dense(
units=self._units,
activation="relu",
kernel_initializer=kernel_initializer)(
x)
logits = tf.keras.layers.Dense(
units=1, activation=None, kernel_initializer=kernel_initializer)(
x)
complexity = tf.constant(1)
return Subnetwork(
last_layer=x, logits=logits, complexity=complexity, shared={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
return self._name
# TODO: Test should be enabled when we support Keras layers.
# class EstimatorKerasLayersTest(tu.AdanetTestCase):
#
# def test_lifecycle(self):
# """Train entire estimator lifecycle using XOR dataset."""
#
# run_config = tf.estimator.RunConfig(tf_random_seed=42)
# estimator = Estimator(
# head=tu.head(),
# subnetwork_generator=SimpleGenerator([
# KerasCNNBuilder("cnn0", learning_rate=.001, num_dense=1, units=3),
# ]),
# max_iteration_steps=100,
# evaluator=Evaluator(
# input_fn=tu.dummy_input_fn([[1., 1., .1, .1]], [[0.]]), steps=3),
# model_dir=self.test_subdirectory,
# force_grow=True,
# config=run_config)
#
# xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
# [1., 1., 1., 1.]]
# xor_labels = [[1.], [0.], [1.], [0.]]
# train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)
#
# # Train.
# estimator.train(input_fn=train_input_fn, max_steps=300)
#
# # Restore from checkpoint to check that variables match up.
# estimator.train(input_fn=train_input_fn, max_steps=1)
#
# # Evaluate.
# eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
# logging.info("%s", eval_results)
# want_loss = 0.164
# self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
#
# # Predict.
# predictions = estimator.predict(
# input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
# for prediction in predictions:
# self.assertIsNotNone(prediction["predictions"])
#
# # Export SavedModel.
# def serving_input_fn():
# """Input fn for serving export, starting from serialized example."""
# serialized_example = tf_compat.v1.placeholder(
# dtype=tf.string, shape=(None), name="serialized_example")
# return tf.estimator.export.ServingInputReceiver(
# features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
# receiver_tensors=serialized_example)
#
# estimator.export_saved_model(
# export_dir_base=self.test_subdirectory,
# serving_input_receiver_fn=serving_input_fn)
class MultiHeadBuilder(Builder):
"""Builds a subnetwork for AdaNet that uses dict labels."""
def __init__(self, learning_rate=.001, split_logits=False, seed=42):
"""Initializes a `LabelsDictBuilder`.
Args:
learning_rate: The float learning rate to use.
split_logits: Whether to return a dict of logits or a single concatenated
logits `Tensor`.
seed: The random seed.
Returns:
An instance of `MultiHeadBuilder`.
"""
self._learning_rate = learning_rate
self._split_logits = split_logits
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = features["x"]
logits = tf_compat.v1.layers.dense(
x,
units=logits_dimension,
activation=None,
kernel_initializer=kernel_initializer)
if self._split_logits:
# Return different logits, one for each head.
logits1, logits2 = tf.split(logits, [1, 1], 1)
logits = {
"head1": logits1,
"head2": logits2,
}
complexity = tf.constant(1)
return Subnetwork(
last_layer=logits,
logits=logits,
complexity=complexity,
persisted_tensors={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
return "multi_head"
class EstimatorMultiHeadTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "concatenated_logits",
"builders": [MultiHeadBuilder()],
"want_loss": 3.218,
}, {
"testcase_name": "split_logits_with_export_subnetworks",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
"export_subnetworks": True,
}, {
"testcase_name": "split_logits",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
})
def test_lifecycle(self, builders, want_loss, export_subnetworks=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
[1., 1., 1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
def train_input_fn():
return {
"x": tf.constant(xor_features)
}, {
"head1": tf.constant(xor_labels),
"head2": tf.constant(xor_labels)
}
estimator = Estimator(
head=multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="head1", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
regression_head.RegressionHead(
name="head2", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
subnetwork_generator=SimpleGenerator(builders),
max_iteration_steps=3,
evaluator=Evaluator(input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory,
config=run_config,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=9)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction[("head1", "predictions")])
self.assertIsNotNone(prediction[("head2", "predictions")])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits_head1", export_signature_def.keys())
self.assertIn("subnetwork_logits_head2", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head1", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head2", export_signature_def.keys())
class EstimatorCallingModelFnDirectlyTest(tu.AdanetTestCase):
"""Tests b/112108745. Warn users not to call model_fn directly."""
def test_calling_model_fn_directly(self):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
with self.assertRaises(UserWarning):
model_fn(
features=features,
mode=tf.estimator.ModeKeys.TRAIN,
labels=labels,
config={})
def test_calling_model_fn_directly_for_predict(self):
with context.graph_mode():
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
model_fn(
features=features,
mode=tf.estimator.ModeKeys.PREDICT,
labels=labels,
config=tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=3,
model_dir=self.test_subdirectory,
))
class EstimatorCheckpointTest(tu.AdanetTestCase):
"""Tests estimator checkpoints."""
@parameterized.named_parameters(
{
"testcase_name": "single_iteration",
"max_iteration_steps": 3,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "single_iteration_keep_one",
"max_iteration_steps": 3,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
}, {
"testcase_name": "three_iterations",
"max_iteration_steps": 1,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "three_iterations_keep_one",
"max_iteration_steps": 1,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
})
def test_checkpoints(self,
max_iteration_steps,
keep_checkpoint_max,
want_num_checkpoints,
max_steps=3):
config = tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=keep_checkpoint_max,
)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
config=config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
checkpoints = tf.io.gfile.glob(
os.path.join(self.test_subdirectory, "*.meta"))
self.assertEqual(want_num_checkpoints, len(checkpoints))
def _check_eventfile_for_keyword(keyword, dir_):
"""Checks event files for the keyword."""
tf_compat.v1.summary.FileWriterCache.clear()
if not tf.io.gfile.exists(dir_):
raise ValueError("Directory '{}' not found.".format(dir_))
# Get last `Event` written.
filenames = os.path.join(dir_, "events*")
event_paths = tf.io.gfile.glob(filenames)
if not event_paths:
raise ValueError("Path '{}' not found.".format(filenames))
for last_event in tf_compat.v1.train.summary_iterator(event_paths[-1]):
if last_event.summary is not None:
for value in last_event.summary.value:
if keyword == value.tag:
if value.HasField("simple_value"):
return value.simple_value
if value.HasField("image"):
return (value.image.height, value.image.width,
value.image.colorspace)
if value.HasField("tensor"):
return value.tensor.string_val
raise ValueError("Keyword '{}' not found in path '{}'.".format(
keyword, filenames))
class _FakeMetric(object):
"""A fake metric."""
def __init__(self, value, dtype):
self._value = value
self._dtype = dtype
def to_metric(self):
tensor = tf.convert_to_tensor(value=self._value, dtype=self._dtype)
return (tensor, tensor)
class _EvalMetricsHead(object):
"""A fake head with the given evaluation metrics."""
def __init__(self, fake_metrics):
self._fake_metrics = fake_metrics
@property
def logits_dimension(self):
return 1
def create_estimator_spec(self,
features,
mode,
logits,
labels=None,
train_op_fn=None):
del features # Unused
metric_ops = None
if self._fake_metrics:
metric_ops = {}
for k, fake_metric in self._fake_metrics.items():
metric_ops[k] = fake_metric.to_metric()
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=logits,
loss=tf.reduce_mean(input_tensor=labels - logits),
eval_metric_ops=metric_ops,
train_op=train_op_fn(1))
def _mean_keras_metric(value):
"""Returns the mean of given value as a Keras metric."""
mean = tf.keras.metrics.Mean()
mean.update_state(value)
return mean
class EstimatorSummaryWriterTest(tu.AdanetTestCase):
"""Test that Tensorboard summaries get written correctly."""
@tf_compat.skip_for_tf2
def test_summaries(self):
"""Tests that summaries are written to candidate directory."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
self.assertAlmostEqual(
3., _check_eventfile_for_keyword("scalar", subnetwork_subdir), places=3)
self.assertEqual((3, 3, 1),
_check_eventfile_for_keyword("image/image/0",
subnetwork_subdir))
self.assertAlmostEqual(
5.,
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir),
places=3)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir),
places=3)
@tf_compat.skip_for_tf2
def test_disable_summaries(self):
"""Tests that summaries can be disabled for ensembles and subnetworks."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory,
enable_ensemble_summaries=False,
enable_subnetwork_summaries=False,
)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("scalar", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("image/image/0", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name": "none_metrics",
"head": _EvalMetricsHead(None),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": tf_compat.v1.metrics.mean(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name":
"keras_metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": _mean_keras_metric(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name": "empty_metrics",
"head": _EvalMetricsHead({}),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"evaluation_name",
"head":
_EvalMetricsHead({}),
"evaluation_name":
"continuous",
"want_summaries": [],
"want_loss":
-1.791,
"global_subdir":
"eval_continuous",
"subnetwork_subdir":
"subnetwork/t0_dnn/eval_continuous",
"ensemble_subdir":
"ensemble/t0_dnn_grow_complexity_regularized/eval_continuous",
}, {
"testcase_name":
"regression_head",
"head":
regression_head.RegressionHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"want_summaries": ["average_loss"],
"want_loss":
.256,
}, {
"testcase_name":
"binary_classification_head",
"head":
binary_class_head.BinaryClassHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"learning_rate":
.6,
"want_summaries": ["average_loss", "accuracy", "recall"],
"want_loss":
0.122,
}, {
"testcase_name":
"all_metrics",
"head":
_EvalMetricsHead({
"float32":
_FakeMetric(1., tf.float32),
"float64":
_FakeMetric(1., tf.float64),
"serialized_summary":
_FakeMetric(
tf_compat.v1.Summary(value=[
tf_compat.v1.Summary.Value(
tag="summary_tag", simple_value=1.)
]).SerializeToString(), tf.string),
}),
"want_summaries": [
"float32",
"float64",
"serialized_summary/0",
],
"want_loss":
-1.791,
})
# pylint: enable=g-long-lambda
def test_eval_metrics(
self,
head,
want_loss,
want_summaries,
evaluation_name=None,
metric_fn=None,
learning_rate=.01,
global_subdir="eval",
subnetwork_subdir="subnetwork/t0_dnn/eval",
ensemble_subdir="ensemble/t0_dnn_grow_complexity_regularized/eval"):
"""Test that AdaNet evaluation metrics get persisted correctly."""
seed = 42
run_config = tf.estimator.RunConfig(tf_random_seed=seed)
subnetwork_generator = SimpleGenerator([
_DNNBuilder(
"dnn",
learning_rate=learning_rate,
mixture_weight_learning_rate=0.,
layer_size=8,
seed=seed)
])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
metric_fn=metric_fn,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
estimator.train(input_fn=train_input_fn, max_steps=100)
metrics = estimator.evaluate(
input_fn=train_input_fn, steps=1, name=evaluation_name)
self.assertAlmostEqual(want_loss, metrics["loss"], places=3)
global_subdir = os.path.join(self.test_subdirectory, global_subdir)
subnetwork_subdir = os.path.join(self.test_subdirectory, subnetwork_subdir)
ensemble_subdir = os.path.join(self.test_subdirectory, ensemble_subdir)
self.assertAlmostEqual(
want_loss,
_check_eventfile_for_keyword("loss", subnetwork_subdir),
places=3)
for metric in want_summaries:
self.assertIsNotNone(
_check_eventfile_for_keyword(metric, subnetwork_subdir),
msg="{} should be under 'eval'.".format(metric))
for dir_ in [global_subdir, ensemble_subdir]:
self.assertAlmostEqual(metrics["loss"],
_check_eventfile_for_keyword("loss", dir_))
self.assertEqual([b"| dnn |"],
_check_eventfile_for_keyword(
"architecture/adanet/ensembles/0", dir_))
for metric in want_summaries:
self.assertTrue(
_check_eventfile_for_keyword(metric, dir_) > 0.,
msg="{} should be under 'eval'.".format(metric))
class EstimatorMembersOverrideTest(tu.AdanetTestCase):
"""Tests b/77494544 fix."""
def test_assert_members_are_not_overridden(self):
"""Assert that AdaNet estimator does not break other estimators."""
config = tf.estimator.RunConfig()
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
adanet = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=config)
self.assertIsNotNone(adanet)
if hasattr(tf.estimator, "LinearEstimator"):
estimator_fn = tf.estimator.LinearEstimator
else:
estimator_fn = tf.contrib.estimator.LinearEstimator
linear = estimator_fn(
head=tu.head(), feature_columns=[tf.feature_column.numeric_column("x")])
self.assertIsNotNone(linear)
def _dummy_feature_dict_input_fn(features, labels):
"""Returns an input_fn that returns feature and labels `Tensors`."""
def _input_fn():
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
return _input_fn
class EstimatorDifferentFeaturesPerModeTest(tu.AdanetTestCase):
"""Tests b/109751254."""
@parameterized.named_parameters(
{
"testcase_name": "extra_train_features",
"train_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_eval_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_predict_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
})
def test_different_features_per_mode(self, train_features, eval_features,
predict_features):
"""Tests tests different numbers of features per mode."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(train_features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Evaluate.
eval_input_fn = _dummy_feature_dict_input_fn(eval_features, labels)
estimator.evaluate(input_fn=eval_input_fn, steps=1)
# Predict.
predict_input_fn = _dummy_feature_dict_input_fn(predict_features, None)
estimator.predict(input_fn=predict_input_fn)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
features = {}
for key, value in predict_features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
class EstimatorExportSavedModelTest(tu.AdanetTestCase):
def test_export_saved_model_for_predict(self):
"""Tests SavedModel exporting functionality for predict (b/110435640)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
for key, value in features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
@test_util.run_in_graph_and_eager_modes
def test_export_saved_model_for_eval(self):
"""Tests SavedModel exporting functionality for eval (b/110991908)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", layer_size=8, learning_rate=1.)])
estimator = Estimator(
head=binary_class_head.BinaryClassHead(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=300)
metrics = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAlmostEqual(.067, metrics["average_loss"], places=3)
self.assertAlmostEqual(1., metrics["recall"], places=3)
self.assertAlmostEqual(1., metrics["accuracy"], places=3)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return export.SupervisedInputReceiver(
features={"x": tf.constant(XOR_FEATURES)},
labels=tf.constant(XOR_LABELS),
receiver_tensors=serialized_example)
export_dir_base = os.path.join(self.test_subdirectory, "export")
try:
estimator.export_saved_model(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
try:
tf.contrib.estimator.export_saved_model_for_mode(
estimator,
export_dir_base=export_dir_base,
input_receiver_fn=serving_input_fn,
mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
subdir = tf.io.gfile.listdir(export_dir_base)[0]
with context.graph_mode(), self.test_session() as sess:
meta_graph_def = tf_compat.v1.saved_model.loader.load(
sess, ["eval"], os.path.join(export_dir_base, subdir))
signature_def = meta_graph_def.signature_def.get("eval")
# Read zero metric.
self.assertAlmostEqual(
0.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
# Run metric update op.
sess.run((tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/update_op"])))
# Read metric again; it should no longer be zero.
self.assertAlmostEqual(
0.067,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/value"])),
places=3)
def test_export_saved_model_always_uses_replication_placement(self):
"""Tests b/137675014."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn1"), _DNNBuilder("dnn2")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config,
experimental_placement_strategy=RoundRobinStrategy())
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
tensor_features = {}
for key, value in features.items():
tensor_features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=tensor_features, receiver_tensors=serialized_example)
# Fake the number of PS replicas so RoundRobinStrategy will be used.
estimator._config._num_ps_replicas = 2
# If we're still using RoundRobinStrategy, this call will fail by trying
# to place ops on non-existent devices.
# Check all three export methods.
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
try:
estimator.export_savedmodel(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
except AttributeError as error:
# Log deprecation errors.
logging.warning("Testing estimator#export_savedmodel: %s", error)
estimator.experimental_export_all_saved_models(
export_dir_base=self.test_subdirectory,
input_receiver_fn_map={
tf.estimator.ModeKeys.PREDICT: serving_input_fn,
})
class EstimatorReportTest(tu.AdanetTestCase):
"""Tests report generation and usage."""
def compare_report_lists(self, report_list1, report_list2):
# Essentially assertEqual(report_list1, report_list2), but ignoring
# the "metrics" attribute.
def make_qualified_name(iteration_number, name):
return "iteration_{}/{}".format(iteration_number, name)
report_dict_1 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list1
}
report_dict_2 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list2
}
self.assertEqual(len(report_list1), len(report_list2))
for qualified_name in report_dict_1.keys():
report_1 = report_dict_1[qualified_name]
report_2 = report_dict_2[qualified_name]
self.assertEqual(
report_1.hparams,
report_2.hparams,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.attributes,
report_2.attributes,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.included_in_final_ensemble,
report_2.included_in_final_ensemble,
msg="{} vs. {}".format(report_1, report_2))
for metric_key, metric_value in report_1.metrics.items():
self.assertEqual(
metric_value,
report_2.metrics[metric_key],
msg="{} vs. {}".format(report_1, report_2))
@parameterized.named_parameters(
{
"testcase_name": "one_iteration_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name": "one_iteration_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name":
"three_iterations_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
)
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
{
"testcase_name":
"three_iterations_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win in every iteration.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
)
def test_report_generation_and_usage(self, subnetwork_builders,
num_iterations,
want_materialized_iteration_reports,
want_previous_ensemble_reports,
want_all_reports):
# Stores the iteration_number, previous_ensemble_reports and all_reports
# arguments in the self._iteration_reports dictionary, overwriting what
# was seen in previous iterations.
spied_iteration_reports = {}
def _spy_fn(iteration_number, previous_ensemble_reports, all_reports):
spied_iteration_reports[iteration_number] = {
"previous_ensemble_reports": previous_ensemble_reports,
"all_reports": all_reports,
}
subnetwork_generator = _FakeGenerator(
spy_fn=_spy_fn, subnetwork_builders=subnetwork_builders)
max_iteration_steps = 5
max_steps = max_iteration_steps * num_iterations + 1
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
report_materializer=ReportMaterializer(
input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory)
report_accessor = estimator._report_accessor
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
materialized_iteration_reports = list(
report_accessor.read_iteration_reports())
self.assertEqual(num_iterations, len(materialized_iteration_reports))
for i in range(num_iterations):
want_materialized_reports = (want_materialized_iteration_reports[i])
materialized_reports = materialized_iteration_reports[i]
self.compare_report_lists(want_materialized_reports, materialized_reports)
# Compute argmin adanet loss.
argmin_adanet_loss = 0
smallest_known_adanet_loss = float("inf")
for j, materialized_subnetwork_report in enumerate(materialized_reports):
if (smallest_known_adanet_loss >
materialized_subnetwork_report.metrics["adanet_loss"]):
smallest_known_adanet_loss = (
materialized_subnetwork_report.metrics["adanet_loss"])
argmin_adanet_loss = j
# Check that the subnetwork with the lowest adanet loss is the one
# that is included in the final ensemble.
for j, materialized_reports in enumerate(materialized_reports):
self.assertEqual(j == argmin_adanet_loss,
materialized_reports.included_in_final_ensemble)
# Check the arguments passed into the generate_candidates method of the
# Generator.
iteration_report = spied_iteration_reports[num_iterations - 1]
self.compare_report_lists(want_previous_ensemble_reports,
iteration_report["previous_ensemble_reports"])
self.compare_report_lists(want_all_reports, iteration_report["all_reports"])
class EstimatorForceGrowTest(tu.AdanetTestCase):
"""Tests the force_grow override.
Uses linear subnetworks with the same seed. They will produce identical
outputs, so unless the `force_grow` override is set, none of the new
subnetworks will improve the AdaNet objective, and AdaNet will not add them to
the ensemble.
"""
@parameterized.named_parameters(
{
"testcase_name": "one_builder_no_force_grow",
"builders": [_FrozenLinearBuilder("linear")],
"force_grow": False,
"want_subnetworks": 1,
}, {
"testcase_name": "two_builders_no_force_grow",
"builders": [
_FrozenLinearBuilder("linear"),
_FrozenLinearBuilder("linear2"),
],
"force_grow": False,
"want_subnetworks": 1,
}, {
"testcase_name": "one_builder",
"builders": [_FrozenLinearBuilder("linear")],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name": "two_builders",
"builders":
[_FrozenLinearBuilder("linear"),
_FrozenLinearBuilder("linear2")],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name":
"two_builders_with_evaluator",
"builders":
[_FrozenLinearBuilder("linear"),
_FrozenLinearBuilder("linear2")],
"force_grow":
True,
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"want_subnetworks":
3,
})
def test_force_grow(self,
builders,
force_grow,
want_subnetworks,
evaluator=None):
"""Test force grow with identical frozen subnetworks."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(builders)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
evaluator=evaluator,
force_grow=force_grow,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for four iterations.
estimator.train(input_fn=train_input_fn, max_steps=3)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertEqual(
want_subnetworks,
str(eval_results["architecture/adanet/ensembles"]).count(" linear "))
class EstimatorDebugTest(tu.AdanetTestCase):
"""Tests b/125483534. Detect NaNs in input_fns."""
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name":
"nan_features",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.math.log([[1., 0.]])
}, tf.zeros([1, 1]))
}, {
"testcase_name":
"nan_label",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, tf.math.log([[0.]]))
}, {
"testcase_name":
"nan_labels_dict",
"head":
multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, {
"y": tf.math.log([[0.]])
})
})
# pylint: enable=g-long-lambda
def test_nans_from_input_fn(self, head, input_fn):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=3,
model_dir=self.test_subdirectory,
debug=True)
with self.assertRaises(tf.errors.InvalidArgumentError):
estimator.train(input_fn=input_fn, max_steps=3)
class EstimatorEvaluateDuringTrainHookTest(tu.AdanetTestCase):
"""Tests b/129000842 with a hook that calls estimator.evaluate()."""
def test_train(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
class EvalTrainHook(tf.estimator.SessionRunHook):
def end(self, session):
estimator.evaluate(input_fn=train_input_fn, steps=1)
# This should not infinite loop.
estimator.train(
input_fn=train_input_fn, max_steps=3, hooks=[EvalTrainHook()])
class CheckpointSaverHookDuringTrainingTest(tu.AdanetTestCase):
"""Tests b/139057887."""
def test_checkpoint_saver_hooks_not_decorated_during_training(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
saver_hook = tf_compat.v1.train.CheckpointSaverHook(
checkpoint_dir=self.test_subdirectory, save_steps=10)
listener = tf_compat.v1.train.CheckpointSaverListener()
estimator.train(
input_fn=train_input_fn,
max_steps=3,
hooks=[saver_hook],
saving_listeners=[listener])
# If CheckpointSaverHook was not recognized during training then all
# saving_listeners would be attached to a default CheckpointSaverHook that
# Estimator creates.
self.assertLen(saver_hook._listeners, 1)
self.assertIs(saver_hook._listeners[0], listener)
class EstimatorTFLearnRunConfigTest(tu.AdanetTestCase):
"""Tests b/129483642 for tf.contrib.learn.RunConfig.
Checks that TF_CONFIG is overwritten correctly when no cluster is specified
in the RunConfig and the only task is of type chief.
"""
def test_train(self):
try:
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
# Removed in TF 1.15 (nightly). See
# https://travis-ci.org/tensorflow/adanet/jobs/583471908
_ = run_config._session_creation_timeout_secs
except AttributeError:
self.skipTest("There is no tf.contrib in TF 2.0.")
try:
tf_config = {
"task": {
"type": "chief",
"index": 0
},
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
run_config._is_chief = True # pylint: disable=protected-access
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Will fail if TF_CONFIG is not overwritten correctly in
# Estimator#prepare_next_iteration.
estimator.train(input_fn=train_input_fn, max_steps=3)
finally:
# Revert TF_CONFIG environment variable in order to not break other tests.
del os.environ["TF_CONFIG"]
class EstimatorReplayTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_evaluator",
"evaluator": None,
"replay_evaluator": None,
"want_architecture": " dnn3 | dnn3 | dnn ",
}, {
"testcase_name":
"evaluator",
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS),
steps=1),
"replay_evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[0., 0.], [0., 0], [0., 0.],
[0., 0.]], [[0], [0], [0], [0]]),
steps=1),
"want_architecture":
" dnn3 | dnn3 | dnn ",
})
def test_replay(self, evaluator, replay_evaluator, want_architecture):
"""Train entire estimator lifecycle using Replay."""
original_model_dir = os.path.join(self.test_subdirectory, "original")
run_config = tf.estimator.RunConfig(
tf_random_seed=42, model_dir=original_model_dir)
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
_DNNBuilder("dnn3", layer_size=5),
])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=evaluator,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for three iterations.
estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
replay_run_config = tf.estimator.RunConfig(
tf_random_seed=42,
model_dir=os.path.join(self.test_subdirectory, "replayed"))
# Use different features and labels to represent a shift in the data
# distribution.
different_features = [[0., 0.], [0., 0], [0., 0.], [0., 0.]]
different_labels = [[0], [0], [0], [0]]
replay_estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=replay_evaluator,
config=replay_run_config,
replay_config=replay.Config(best_ensemble_indices=[2, 3, 1]))
train_input_fn = tu.dummy_input_fn(different_features, different_labels)
# Train for three iterations.
replay_estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = replay_estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
if __name__ == "__main__":
tf.test.main()
| 115,180 | 33.734922 | 139 | py |
adanet | adanet-master/adanet/distributed/placement_test.py | # Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributed placement strategy tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
from adanet.distributed.placement import ReplicationStrategy
from adanet.distributed.placement import RoundRobinStrategy
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class ReplicationStrategyTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_strategy(self):
strategy = ReplicationStrategy()
num_subnetworks = 3
subnetwork_index = 1
self.assertTrue(strategy.should_build_ensemble(num_subnetworks))
self.assertTrue(
strategy.should_build_subnetwork(num_subnetworks, subnetwork_index))
self.assertTrue(strategy.should_train_subnetworks(num_subnetworks))
class WorkerConfig(object):
def __init__(self, num_worker_replicas, global_id_in_cluster):
self.num_worker_replicas = num_worker_replicas
self.global_id_in_cluster = global_id_in_cluster
class ParameterServerConfig(object):
def __init__(self, num_ps_replicas):
self.num_ps_replicas = num_ps_replicas
def _testcase_name(name, drop_remainder):
return "{}{}".format(name, "_drop_remainder" if drop_remainder else "")
class RoundRobinStrategyTest(parameterized.TestCase, tf.test.TestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(
itertools.chain(*[[
{
"testcase_name":
_testcase_name("one_worker_one_subnetwork", drop_remainder),
"num_workers":
1,
"num_subnetworks":
1,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True],
"want_should_build_subnetwork": [[True]],
"want_should_train_subnetworks": [True],
},
{
"testcase_name":
_testcase_name("three_workers_one_subnetworks", drop_remainder
),
"num_workers":
3,
"num_subnetworks":
1,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, True, True],
"want_should_build_subnetwork": [[True], [True], [True]],
"want_should_train_subnetworks": [True, True, True],
},
{
"testcase_name":
_testcase_name("two_workers_one_subnetworks", drop_remainder),
"num_workers":
2,
"num_subnetworks":
5,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False],
"want_should_build_subnetwork": [[True, True, True, True, True],
[
True,
not drop_remainder,
not drop_remainder,
not drop_remainder,
not drop_remainder,
]],
"want_should_train_subnetworks": [False, True],
},
{
"testcase_name":
_testcase_name("one_worker_three_subnetworks", drop_remainder
),
"num_workers":
1,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True],
"want_should_build_subnetwork": [[True, True, True]],
"want_should_train_subnetworks": [True],
},
{
"testcase_name":
_testcase_name("two_workers_three_subnetworks", drop_remainder
),
"num_workers":
2,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, not drop_remainder, not drop_remainder],
],
"want_should_train_subnetworks": [False, True],
},
{
"testcase_name":
_testcase_name("three_workers_three_subnetworks",
drop_remainder),
"num_workers":
3,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, not drop_remainder],
[False, True, False],
],
"want_should_train_subnetworks": [False, True, True],
},
{
"testcase_name":
_testcase_name("four_workers_three_subnetworks",
drop_remainder),
"num_workers":
4,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False, False, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
],
"want_should_train_subnetworks": [False, True, True, True],
},
{
"testcase_name":
_testcase_name("five_workers_three_subnetworks",
drop_remainder),
"num_workers":
5,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble": [True, False, False, False, True],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, True, True],
],
"want_should_train_subnetworks": [False, True, True, True, False],
},
{
"testcase_name":
_testcase_name("six_workers_three_subnetworks", drop_remainder
),
"num_workers":
6,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble":
[True, False, False, False, True, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, True, True],
[True, not drop_remainder, not drop_remainder],
],
"want_should_train_subnetworks":
[False, True, True, True, False, True],
},
{
"testcase_name":
_testcase_name("seven_workers_three_subnetworks",
drop_remainder),
"num_workers":
7,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble":
[True, False, False, False, True, False, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, True, True],
[True, False, not drop_remainder],
[False, True, False],
],
"want_should_train_subnetworks":
[False, True, True, True, False, True, True],
},
{
"testcase_name":
_testcase_name("eight_workers_three_subnetworks",
drop_remainder),
"num_workers":
8,
"num_subnetworks":
3,
"drop_remainder":
drop_remainder,
"want_should_build_ensemble":
[True, False, False, False, True, False, False, False],
"want_should_build_subnetwork": [
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
],
"want_should_train_subnetworks":
[False, True, True, True, False, True, True, True],
},
] for drop_remainder in [False, True]]))
# pylint: enable=g-complex-comprehension
@test_util.run_in_graph_and_eager_modes
def test_worker_methods(self, num_workers, num_subnetworks, drop_remainder,
want_should_build_ensemble,
want_should_build_subnetwork,
want_should_train_subnetworks):
should_build_ensemble = []
should_build_subnetwork = []
should_train_subnetworks = []
for worker_index in range(num_workers):
strategy = RoundRobinStrategy(drop_remainder)
strategy.config = WorkerConfig(num_workers, worker_index)
should_build_ensemble.append(
strategy.should_build_ensemble(num_subnetworks))
should_build_subnetwork.append([])
should_train_subnetworks.append(
strategy.should_train_subnetworks(num_subnetworks))
for subnetwork_index in range(num_subnetworks):
should_build_subnetwork[-1].append(
strategy.should_build_subnetwork(num_subnetworks, subnetwork_index))
self.assertEqual(want_should_build_ensemble, should_build_ensemble)
self.assertEqual(want_should_build_subnetwork, should_build_subnetwork)
self.assertEqual(want_should_train_subnetworks, should_train_subnetworks)
@parameterized.named_parameters(
{
"testcase_name":
"one_ps_one_subnetwork",
"num_ps":
1,
"num_subnetworks":
1,
"want_variable_devices": [[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],],
},
{
"testcase_name":
"three_ps_one_subnetwork",
"num_ps":
3,
"num_subnetworks":
1,
"want_variable_devices": [[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:2",
"/job:ps/task:0"
],],
},
{
"testcase_name":
"two_ps_five_subnetworks",
"num_ps":
2,
"num_subnetworks":
5,
"want_variable_devices": [
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:1", "/job:ps/task:1", "/job:ps/task:1",
"/job:ps/task:1"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:1", "/job:ps/task:1", "/job:ps/task:1",
"/job:ps/task:1"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
],
},
{
"testcase_name":
"one_ps_three_subnetworks",
"num_ps":
1,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
],
},
{
"testcase_name":
"two_ps_three_subnetworks",
"num_ps":
2,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:1", "/job:ps/task:1", "/job:ps/task:1",
"/job:ps/task:1"
],
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
],
},
{
"testcase_name":
"three_ps_three_subnetworks",
"num_ps":
3,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:0", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:1", "/job:ps/task:1", "/job:ps/task:1",
"/job:ps/task:1"
],
[
"/job:ps/task:2", "/job:ps/task:2", "/job:ps/task:2",
"/job:ps/task:2"
],
],
},
{
"testcase_name":
"three_ps_three_subnetworks_no_dedicated_parameter_servers",
"num_ps":
3,
"num_subnetworks":
3,
"dedicate_parameter_servers":
False,
"want_variable_devices": [
["", "", "", ""],
["", "", "", ""],
["", "", "", ""],
],
},
{
"testcase_name":
"four_ps_three_subnetworks",
"num_ps":
4,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:2", "/job:ps/task:2", "/job:ps/task:2",
"/job:ps/task:2"
],
[
"/job:ps/task:3", "/job:ps/task:3", "/job:ps/task:3",
"/job:ps/task:3"
],
],
},
{
"testcase_name":
"five_ps_three_subnetworks",
"num_ps":
5,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:2", "/job:ps/task:3", "/job:ps/task:3",
"/job:ps/task:2"
],
[
"/job:ps/task:4", "/job:ps/task:4", "/job:ps/task:4",
"/job:ps/task:4"
],
],
},
{
"testcase_name":
"six_ps_three_subnetworks",
"num_ps":
6,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:0",
"/job:ps/task:0"
],
[
"/job:ps/task:2", "/job:ps/task:3", "/job:ps/task:3",
"/job:ps/task:2"
],
[
"/job:ps/task:5", "/job:ps/task:4", "/job:ps/task:4",
"/job:ps/task:5"
],
],
},
{
"testcase_name":
"seven_ps_three_subnetworks",
"num_ps":
7,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:2",
"/job:ps/task:0"
],
[
"/job:ps/task:3", "/job:ps/task:4", "/job:ps/task:4",
"/job:ps/task:3"
],
[
"/job:ps/task:6", "/job:ps/task:5", "/job:ps/task:5",
"/job:ps/task:6"
],
],
},
{
"testcase_name":
"eight_ps_three_subnetworks",
"num_ps":
8,
"num_subnetworks":
3,
"want_variable_devices": [
[
"/job:ps/task:1", "/job:ps/task:0", "/job:ps/task:2",
"/job:ps/task:0"
],
[
"/job:ps/task:4", "/job:ps/task:5", "/job:ps/task:5",
"/job:ps/task:4"
],
[
"/job:ps/task:7", "/job:ps/task:6", "/job:ps/task:6",
"/job:ps/task:7"
],
],
},
)
@test_util.run_in_graph_and_eager_modes
def test_device_methods(self,
num_ps,
num_subnetworks,
want_variable_devices,
dedicate_parameter_servers=True):
with context.graph_mode():
x = tf.constant([[1., 0.]])
strategy = RoundRobinStrategy(
dedicate_parameter_servers=dedicate_parameter_servers)
strategy.config = ParameterServerConfig(num_ps)
variable_devices = []
for i in range(num_subnetworks):
with strategy.subnetwork_devices(num_subnetworks, i):
subnetwork = tf.keras.Sequential()
subnetwork.add(tf.keras.layers.Dense(4))
subnetwork.add(tf.keras.layers.Dense(3))
subnetwork(x)
variable_devices.append([w.op.device for w in subnetwork.weights])
self.assertEqual(want_variable_devices, variable_devices)
if __name__ == "__main__":
tf.test.main()
| 19,604 | 33.334501 | 80 | py |
adanet | adanet-master/adanet/autoensemble/estimator_v2_test.py | """Tests for AdaNet AutoEnsembleEstimator in TF 2.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import sys
from absl import flags
from absl.testing import parameterized
from adanet import tf_compat
from adanet.autoensemble.estimator import AutoEnsembleEstimator
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import regression_head
# pylint: enable=g-direct-tensorflow-import
class AutoEnsembleEstimatorV2Test(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(AutoEnsembleEstimatorV2Test, self).setUp()
# Setup and cleanup test directory.
# Flags are not automatically parsed at this point.
flags.FLAGS(sys.argv)
self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def tearDown(self):
super(AutoEnsembleEstimatorV2Test, self).tearDown()
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name":
"candidate_pool_lambda",
"candidate_pool":
lambda head, feature_columns, optimizer: lambda config: {
"dnn":
tf.compat.v2.estimator.DNNEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
hidden_units=[3],
config=config),
"linear":
tf.compat.v2.estimator.LinearEstimator(
head=head,
feature_columns=feature_columns,
optimizer=optimizer,
config=config),
},
"want_loss":
.209,
},)
# pylint: enable=g-long-lambda
@tf_compat.skip_for_tf1
def test_auto_ensemble_estimator_lifecycle(self,
candidate_pool,
want_loss,
max_train_steps=30):
features = {"input_1": [[1., 0.]]}
labels = [[1.]]
run_config = tf.estimator.RunConfig(tf_random_seed=42)
head = regression_head.RegressionHead()
# Always create optimizers in a lambda to prevent error like:
# `RuntimeError: Cannot set `iterations` to a new Variable after the
# Optimizer weights have been created`
optimizer = lambda: tf.keras.optimizers.SGD(lr=.01)
feature_columns = [tf.feature_column.numeric_column("input_1", shape=[2])]
def train_input_fn():
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
def test_input_fn():
dataset = tf.data.Dataset.from_tensors([tf.constant(features["input_1"])])
input_features = tf.compat.v1.data.make_one_shot_iterator(
dataset).get_next()
return {"input_1": input_features}, None
estimator = AutoEnsembleEstimator(
head=head,
candidate_pool=candidate_pool(head, feature_columns, optimizer),
max_iteration_steps=10,
force_grow=True,
model_dir=self.test_subdirectory,
config=run_config)
# Train for three iterations.
estimator.train(input_fn=train_input_fn, max_steps=max_train_steps)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAllClose(max_train_steps, eval_results["global_step"])
self.assertAllClose(want_loss, eval_results["loss"], atol=.3)
# Predict.
predictions = estimator.predict(input_fn=test_input_fn)
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
for key, value in features.items():
features[key] = tf.constant(value)
return export.SupervisedInputReceiver(
features=features,
labels=tf.constant(labels),
receiver_tensors=serialized_example)
export_dir_base = os.path.join(self.test_subdirectory, "export")
estimator.export_saved_model(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if __name__ == "__main__":
tf.enable_v2_behavior()
tf.test.main()
| 5,484 | 35.324503 | 80 | py |
adanet | adanet-master/adanet/experimental/__init__.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaNet experimental directory."""
from adanet.experimental import controllers
from adanet.experimental import keras
from adanet.experimental import phases
from adanet.experimental import schedulers
from adanet.experimental import storages
from adanet.experimental import work_units
__all__ = [
"controllers",
"keras",
"phases",
"schedulers",
"storages",
"work_units",
]
| 1,021 | 29.969697 | 74 | py |
adanet | adanet-master/adanet/experimental/storages/storage.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A storage for persisting results and managing stage."""
import abc
from typing import Iterable, List
import tensorflow.compat.v2 as tf
class ModelContainer:
"""A container for a model and its metadata."""
def __init__(self, score: float, model: tf.keras.Model, metrics: List[float]):
self.score = score
self.model = model
self.metrics = metrics
def __eq__(self, other: 'ModelContainer'):
return self.score == other.score
def __lt__(self, other: 'ModelContainer'):
return self.score < other.score
class Storage(abc.ABC):
"""A storage for persisting results and managing state."""
@abc.abstractmethod
def save_model(self, model_container: ModelContainer):
"""Stores a model and its metadata."""
# TODO: How do we enforce that save_model is called only once per
# model?
pass
@abc.abstractmethod
def get_models(self) -> Iterable[tf.keras.Model]:
"""Returns all stored models."""
pass
@abc.abstractmethod
def get_best_models(self, num_models: int = 1) -> Iterable[tf.keras.Model]:
"""Returns the top `num_models` stored models in descending order."""
pass
@abc.abstractmethod
def get_model_metrics(self) -> Iterable[Iterable[float]]:
"""Returns the metrics for all stored models."""
pass
| 1,910 | 29.822581 | 80 | py |
adanet | adanet-master/adanet/experimental/storages/in_memory_storage.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A storage for persisting results and managing stage."""
import heapq
from typing import List
from adanet.experimental.storages.storage import ModelContainer
from adanet.experimental.storages.storage import Storage
import tensorflow.compat.v2 as tf
class InMemoryStorage(Storage):
"""In memory storage for testing-only.
Uses a priority queue under the hood to sort the models according to their
score.
Currently the only supported score is 'loss'.
"""
def __init__(self):
self._model_containers = []
def save_model(self, model_container: ModelContainer):
"""Stores a model.
Args:
model_container: A `ModelContainer` instance.
"""
# We use a counter since heappush will compare on the second item in the
# tuple in the case of a tie in the first item comparison. This is for the
# off chance that two models have the same loss.
heapq.heappush(self._model_containers, model_container)
def get_models(self) -> List[tf.keras.Model]:
"""Returns all stored models."""
return [c.model for c in self._model_containers]
def get_best_models(self, num_models: int = 1) -> List[tf.keras.Model]:
"""Returns the top `num_models` stored models in descending order."""
return [c.model
for c in heapq.nsmallest(num_models, self._model_containers)]
def get_model_metrics(self) -> List[List[float]]:
"""Returns the metrics for all stored models."""
return [c.metrics for c in self._model_containers]
| 2,113 | 34.233333 | 78 | py |
adanet | adanet-master/adanet/experimental/work_units/keras_tuner_work_unit.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A work unit for training, evaluating, and saving a Keras model."""
import os
import time
from adanet.experimental.work_units import work_unit
from kerastuner.engine.tuner import Tuner
import tensorflow.compat.v2 as tf
class KerasTunerWorkUnit(work_unit.WorkUnit):
"""Trains, evaluates and saves a tuned Keras model."""
def __init__(self, tuner: Tuner, *search_args, **search_kwargs):
self._tuner = tuner
self._search_args = search_args
self._search_kwargs = search_kwargs
# TODO: Allow better customization of TensorBoard log_dir.
def execute(self):
log_dir = os.path.join('/tmp', str(int(time.time())))
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
update_freq='batch')
# We don't need to eval and store, because the Tuner does it for us.
self._tuner.search(callbacks=[tensorboard], *self._search_args,
**self._search_kwargs)
| 1,584 | 37.658537 | 74 | py |
adanet | adanet-master/adanet/experimental/work_units/keras_trainer_work_unit.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A work unit for training, evaluating, and saving a Keras model."""
import os
import time
from adanet.experimental.storages.storage import ModelContainer
from adanet.experimental.storages.storage import Storage
from adanet.experimental.work_units import work_unit
import tensorflow.compat.v2 as tf
class KerasTrainerWorkUnit(work_unit.WorkUnit):
"""Trains, evaluates, and saves a Keras model."""
def __init__(self, model: tf.keras.Model,
train_dataset: tf.data.Dataset,
eval_dataset: tf.data.Dataset,
storage: Storage,
tensorboard_base_dir: str = '/tmp'):
self._model = model
self._train_dataset = train_dataset
self._eval_dataset = eval_dataset
self._storage = storage
self._tensorboard_base_dir = tensorboard_base_dir
# TODO: Allow better customization of TensorBoard log_dir.
def execute(self):
log_dir = os.path.join(self._tensorboard_base_dir, str(int(time.time())))
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
update_freq='batch')
if self._model.trainable:
self._model.fit(self._train_dataset, callbacks=[tensorboard])
else:
print('Skipping training since model.trainable set to false.')
results = self._model.evaluate(self._eval_dataset, callbacks=[tensorboard])
# If the model was compiled with metrics, the results is a list of loss +
# metric values. If the model was compiled without metrics, it is a loss
# scalar.
if not isinstance(results, list):
results = [results]
self._storage.save_model(ModelContainer(results[0], self._model, results))
| 2,301 | 40.107143 | 79 | py |
adanet | adanet-master/adanet/experimental/work_units/__init__.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaNet ModelFlow work units."""
from adanet.experimental.work_units.keras_trainer_work_unit import KerasTrainerWorkUnit
from adanet.experimental.work_units.keras_tuner_work_unit import KerasTunerWorkUnit
__all__ = [
"KerasTrainerWorkUnit",
"KerasTunerWorkUnit",
]
| 899 | 35 | 87 | py |
adanet | adanet-master/adanet/experimental/phases/autoensemble_phase.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A phase that automatically ensembles models."""
import abc
import random
from typing import Iterable, Iterator, List
from adanet.experimental.keras.ensemble_model import EnsembleModel
from adanet.experimental.keras.ensemble_model import MeanEnsemble
from adanet.experimental.phases.phase import DatasetProvider
from adanet.experimental.phases.phase import ModelProvider
from adanet.experimental.storages.in_memory_storage import InMemoryStorage
from adanet.experimental.storages.storage import Storage
from adanet.experimental.work_units.keras_trainer_work_unit import KerasTrainerWorkUnit
from adanet.experimental.work_units.work_unit import WorkUnit
import tensorflow.compat.v2 as tf
class EnsembleStrategy(abc.ABC):
"""An abstract ensemble strategy."""
@abc.abstractmethod
def __call__(
self, candidates: List[tf.keras.Model]) -> Iterable[List[tf.keras.Model]]:
pass
class Ensembler(abc.ABC):
"""An abstract ensembler."""
def __init__(self, loss, optimizer, metrics):
self._loss = loss
self._optimizer = optimizer
self._metrics = metrics
@abc.abstractmethod
def __call__(self, submodels: List[tf.keras.Model]) -> EnsembleModel:
pass
class MeanEnsembler(Ensembler):
"""An ensembler that averages the weights of submodel outputs."""
def __init__(self, loss, optimizer, metrics, freeze_submodels=True):
super().__init__(loss, optimizer, metrics)
self._freeze_submodels = freeze_submodels
def __call__(self, submodels: List[tf.keras.Model]) -> EnsembleModel:
ensemble = MeanEnsemble(submodels, freeze_submodels=self._freeze_submodels)
if self._freeze_submodels:
for layer in ensemble.layers:
layer.trainable = False
# Compile SGD with learning rate set to 0 for no weight updates.
ensemble.compile(
loss=self._loss, optimizer=tf.keras.optimizers.SGD(0),
metrics=self._metrics)
return ensemble
class GrowStrategy(EnsembleStrategy):
"""An ensemble strategy that adds one candidate to the ensemble at a time."""
def __call__(
self, candidates: List[tf.keras.Model]) -> Iterable[List[tf.keras.Model]]:
return [[candidate] for candidate in candidates]
class AllStrategy(EnsembleStrategy):
"""An ensemble strategy that adds all candidates to the ensemble."""
def __call__(
self, candidates: List[tf.keras.Model]) -> Iterable[List[tf.keras.Model]]:
return [candidates]
class RandomKStrategy(EnsembleStrategy):
"""An ensemble strategy that adds k random candidates (with replacement)."""
def __init__(self, k, seed=None):
"""Initializes a RandomKStrategy ensemble strategy.
Args:
k: Number of candidates to sample.
seed: Random seed.
"""
self._k = k
self._seed = seed
def __call__(
self, candidates: List[tf.keras.Model]) -> Iterable[List[tf.keras.Model]]:
if self._seed:
random_state = random.getstate()
random.seed(self._seed)
candidates = [random.choices(candidates, k=self._k)]
random_state = random.setstate(random_state)
else:
candidates = [random.choices(candidates, k=self._k)]
return [candidates]
class AutoEnsemblePhase(DatasetProvider, ModelProvider):
"""A phase that automatically ensembles models from a prior phase."""
def __init__(self,
ensemblers: List[Ensembler],
ensemble_strategies: List[EnsembleStrategy],
storage: Storage = InMemoryStorage(),
num_candidates: int = None):
"""Initializes an AutoEnsemblePhase.
Args:
ensemblers: A list of `Ensembler` instances to determine how to combine
subnetworks.
ensemble_strategies: A list of `EnsembleStrategy` instances to determine
which subnetworks compose an ensemble.
storage: A `Storage` instance to store models and model metadata.
num_candidates: The number of subnetwork candidates to consider from the
previous phase. If `None` then all of the subnetworks generated in the
previous phase will be considered.
"""
super().__init__(storage)
self._ensemblers = ensemblers
self._ensemble_strategies = ensemble_strategies
self._num_candidates = num_candidates
def work_units(self, previous_phase) -> Iterator[WorkUnit]:
self._train_dataset = previous_phase.get_train_dataset()
self._eval_dataset = previous_phase.get_eval_dataset()
if self._num_candidates:
candidates = previous_phase.get_best_models(
num_models=self._num_candidates)
else:
candidates = previous_phase.get_models()
if self.get_best_models():
current_best_ensemble = list(self.get_best_models())[0]
else:
current_best_ensemble = None
for ensemble_strategy in self._ensemble_strategies:
for submodels in ensemble_strategy(candidates):
for ensembler in self._ensemblers:
if current_best_ensemble:
previous_ensemble = current_best_ensemble.submodels
else:
previous_ensemble = []
ensemble = ensembler(previous_ensemble + submodels)
yield KerasTrainerWorkUnit(ensemble,
previous_phase.get_train_dataset(),
previous_phase.get_eval_dataset(),
self._storage)
def get_models(self) -> Iterable[tf.keras.Model]:
return self._storage.get_models()
def get_best_models(self, num_models=1) -> Iterable[tf.keras.Model]:
return self._storage.get_best_models(num_models)
# TODO: Add some way to check that work_units has to be called
# before accessing these methods.
def get_train_dataset(self) -> tf.data.Dataset:
return self._train_dataset
def get_eval_dataset(self) -> tf.data.Dataset:
return self._eval_dataset
| 6,425 | 34.899441 | 87 | py |
adanet | adanet-master/adanet/experimental/phases/repeat_phase.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A phase that repeats its inner phases."""
from typing import Callable, Iterable, Iterator, List
from adanet.experimental.phases.phase import DatasetProvider
from adanet.experimental.phases.phase import ModelProvider
from adanet.experimental.phases.phase import Phase
from adanet.experimental.work_units.work_unit import WorkUnit
import tensorflow.compat.v2 as tf
class RepeatPhase(DatasetProvider, ModelProvider):
"""A phase that repeats its inner phases."""
def __init__(self,
phase_factory: List[Callable[..., Phase]],
repetitions: int):
self._phase_factory = phase_factory
self._repetitions = repetitions
self._final_phase = None
"""Initializes a RepeatPhase.
Args:
phase_factory: A list of callables that return `Phase` instances.
repetitions: Number of times to repeat the phases in the phase factory.
"""
def work_units(self, previous_phase: DatasetProvider) -> Iterator[WorkUnit]:
for _ in range(self._repetitions):
# Each repetition, the "first" previous phase is the one preceeding the
# repeat phase itself.
prev_phase = previous_phase
for phase in self._phase_factory:
phase = phase()
for work_unit in phase.work_units(prev_phase):
yield work_unit
prev_phase = phase
self._final_phase = prev_phase
def get_train_dataset(self) -> tf.data.Dataset:
if not isinstance(self._final_phase, DatasetProvider):
raise NotImplementedError(
'The last phase in repetition does not provide datasets.')
return self._final_phase.get_train_dataset()
def get_eval_dataset(self) -> tf.data.Dataset:
if not isinstance(self._final_phase, DatasetProvider):
raise NotImplementedError(
'The last phase in repetition does not provide datasets.')
return self._final_phase.get_eval_dataset()
def get_models(self) -> Iterable[tf.keras.Model]:
if not isinstance(self._final_phase, ModelProvider):
raise NotImplementedError(
'The last phase in repetition does not provide models.')
return self._final_phase.get_models()
def get_best_models(self, num_models=1) -> Iterable[tf.keras.Model]:
if not isinstance(self._final_phase, ModelProvider):
raise NotImplementedError(
'The last phase in repetition does not provide models.')
return self._final_phase.get_best_models(num_models)
| 3,035 | 38.947368 | 78 | py |
adanet | adanet-master/adanet/experimental/phases/keras_tuner_phase.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A phase in the AdaNet workflow."""
import sys
from typing import Callable, Iterable, Iterator, Union
from adanet.experimental.phases.phase import DatasetProvider
from adanet.experimental.phases.phase import ModelProvider
from adanet.experimental.work_units.keras_tuner_work_unit import KerasTunerWorkUnit
from adanet.experimental.work_units.work_unit import WorkUnit
from kerastuner.engine.tuner import Tuner
import tensorflow.compat.v2 as tf
class KerasTunerPhase(DatasetProvider, ModelProvider):
"""Tunes Keras Model hyperparameters using the Keras Tuner."""
def __init__(self, tuner: Union[Callable[..., Tuner], Tuner], *search_args,
**search_kwargs):
"""Initializes a KerasTunerPhase.
Args:
tuner: A `kerastuner.tuners.tuner.Tuner` instance or a callable that
returns a `kerastuner.tuners.tuner.Tuner` instance.
*search_args: Arguments to pass to the tuner search method.
**search_kwargs: Keyword arguments to pass to the tuner search method.
"""
if callable(tuner):
self._tuner = tuner()
else:
self._tuner = tuner
self._search_args = search_args
self._search_kwargs = search_kwargs
def work_units(self, previous_phase: DatasetProvider) -> Iterator[WorkUnit]:
self._train_dataset = previous_phase.get_train_dataset()
self._eval_dataset = previous_phase.get_eval_dataset()
yield KerasTunerWorkUnit(
self._tuner,
x=self._train_dataset,
validation_data=self._eval_dataset,
*self._search_args,
**self._search_kwargs)
# TODO: Find a better way to get all models than to pass in a
# large number.
def get_models(self) -> Iterable[tf.keras.Model]:
return self._tuner.get_best_models(num_models=sys.maxsize)
def get_best_models(self, num_models) -> Iterable[tf.keras.Model]:
return self._tuner.get_best_models(num_models=num_models)
def get_train_dataset(self) -> tf.data.Dataset:
return self._train_dataset
def get_eval_dataset(self) -> tf.data.Dataset:
return self._eval_dataset
| 2,683 | 36.277778 | 83 | py |
adanet | adanet-master/adanet/experimental/phases/phase.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A phase in the AdaNet workflow."""
import abc
from typing import Iterable, Iterator, Optional
from adanet.experimental.storages.in_memory_storage import InMemoryStorage
from adanet.experimental.storages.storage import Storage
from adanet.experimental.work_units.work_unit import WorkUnit
import tensorflow.compat.v2 as tf
class Phase(abc.ABC):
"""A stage in a linear workflow."""
def __init__(self, storage: Storage = InMemoryStorage()):
self._storage = storage
# TODO: Find a better way to ensure work_units only gets called
# once per phase.
@abc.abstractmethod
def work_units(self, previous_phase: Optional['Phase']) -> Iterator[WorkUnit]:
pass
class DatasetProvider(Phase, abc.ABC):
"""An interface for a phase that produces datasets."""
def __init__(self, storage: Storage = InMemoryStorage()):
"""Initializes a Phase.
Args:
storage: A `Storage` instance.
"""
super().__init__(storage)
self._train_dataset = None
self._eval_dataset = None
@abc.abstractmethod
def get_train_dataset(self) -> tf.data.Dataset:
"""Returns the dataset for train data."""
pass
@abc.abstractmethod
def get_eval_dataset(self) -> tf.data.Dataset:
"""Returns the dataset for eval data."""
pass
class ModelProvider(Phase, abc.ABC):
"""An interface for a phase that produces models."""
@abc.abstractmethod
def get_models(self) -> Iterable[tf.keras.Model]:
"""Returns the models produced by this phase."""
pass
@abc.abstractmethod
def get_best_models(self, num_models: int = 1) -> Iterable[tf.keras.Model]:
"""Returns the `k` best models produced by this phase."""
pass
| 2,294 | 28.805195 | 80 | py |
adanet | adanet-master/adanet/experimental/phases/__init__.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaNet ModelFlow phases."""
from adanet.experimental.phases.autoensemble_phase import AutoEnsemblePhase
from adanet.experimental.phases.input_phase import InputPhase
from adanet.experimental.phases.keras_trainer_phase import KerasTrainerPhase
from adanet.experimental.phases.keras_tuner_phase import KerasTunerPhase
from adanet.experimental.phases.repeat_phase import RepeatPhase
__all__ = [
"AutoEnsemblePhase",
"InputPhase",
"KerasTrainerPhase",
"KerasTunerPhase",
"RepeatPhase",
]
| 1,131 | 35.516129 | 76 | py |
adanet | adanet-master/adanet/experimental/phases/keras_trainer_phase.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A phase in the AdaNet workflow."""
from typing import Callable, Iterable, Iterator, Union
from adanet.experimental.phases.phase import DatasetProvider
from adanet.experimental.phases.phase import ModelProvider
from adanet.experimental.storages.in_memory_storage import InMemoryStorage
from adanet.experimental.storages.storage import Storage
from adanet.experimental.work_units.keras_trainer_work_unit import KerasTrainerWorkUnit
from adanet.experimental.work_units.work_unit import WorkUnit
import tensorflow.compat.v2 as tf
class KerasTrainerPhase(DatasetProvider, ModelProvider):
"""Trains Keras models."""
def __init__(self,
models: Union[Iterable[tf.keras.Model],
Callable[[], Iterable[tf.keras.Model]]],
storage: Storage = InMemoryStorage()):
"""Initializes a KerasTrainerPhase.
Args:
models: A list of `tf.keras.Model` instances or a list of callables that
return `tf.keras.Model` instances.
storage: A `Storage` instance.
"""
# TODO: Consume arbitary fit inputs.
# Dataset should be wrapped inside a work unit.
# For instance when you create KerasTrainer work unit the dataset is
# encapsulated inside that work unit.
# What if you want to run on different (parts of the) datasets
# what if a work units consumes numpy arrays?
super().__init__(storage)
self._models = models
def work_units(self, previous_phase: DatasetProvider) -> Iterator[WorkUnit]:
self._train_dataset = previous_phase.get_train_dataset()
self._eval_dataset = previous_phase.get_eval_dataset()
models = self._models
if callable(models):
models = models()
for model in models:
yield KerasTrainerWorkUnit(model, self._train_dataset, self._eval_dataset,
self._storage)
def get_models(self) -> Iterable[tf.keras.Model]:
return self._storage.get_models()
def get_best_models(self, num_models) -> Iterable[tf.keras.Model]:
return self._storage.get_best_models(num_models)
def get_train_dataset(self) -> tf.data.Dataset:
return self._train_dataset
def get_eval_dataset(self) -> tf.data.Dataset:
return self._eval_dataset
| 2,844 | 39.070423 | 87 | py |
adanet | adanet-master/adanet/experimental/keras/ensemble_model.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An AdaNet ensemble implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Sequence
import tensorflow.compat.v2 as tf
class EnsembleModel(tf.keras.Model):
"""An ensemble of Keras models."""
def __init__(self, submodels: Sequence[tf.keras.Model],
freeze_submodels: bool = True):
"""Initializes an EnsembleModel.
Args:
submodels: A list of `tf.keras.Model` that compose the ensemble.
freeze_submodels: Whether to freeze the weights of submodels.
"""
super().__init__()
if freeze_submodels:
for submodel in submodels:
for layer in submodel.layers:
layer.trainable = False
self._submodels = submodels
@property
def submodels(self) -> Sequence[tf.keras.Model]:
return self._submodels
def call(self, inputs):
raise NotImplementedError
class MeanEnsemble(EnsembleModel):
"""An ensemble that averages submodel outputs."""
def call(self, inputs):
if len(self._submodels) == 1:
return self._submodels[0](inputs)
submodel_outputs = []
for submodel in self._submodels:
submodel_outputs.append(submodel(inputs))
return tf.keras.layers.average(submodel_outputs)
class WeightedEnsemble(EnsembleModel):
"""An ensemble that linearly combines submodel outputs."""
# TODO: Extract output shapes from submodels instead of passing in
# as argument.
def __init__(self, submodels: Sequence[tf.keras.Model], output_units: int):
"""Initializes a WeightedEnsemble.
Args:
submodels: A list of `adanet.keras.SubModel` that compose the ensemble.
output_units: The output size of the last layer of each submodel.
"""
super().__init__(submodels)
self.dense = tf.keras.layers.Dense(units=output_units)
def call(self, inputs):
submodel_outputs = []
for submodel in self.submodels:
submodel_outputs.append(submodel(inputs))
return self.dense(tf.stack(submodel_outputs))
| 2,663 | 29.62069 | 79 | py |
Subsets and Splits