repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
SyNet | SyNet-master/CenterNet/src/lib/datasets/sample/ddd.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
import pycocotools.coco as coco
class DddDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _convert_alpha(self, alpha):
return math.radians(alpha + 45) if self.alpha_in_degree else alpha
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
img = cv2.imread(img_path)
if 'calib' in img_info:
calib = np.array(img_info['calib'], dtype=np.float32)
else:
calib = self.calib
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
if self.opt.keep_res:
s = np.array([self.opt.input_w, self.opt.input_h], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
aug = False
if self.split == 'train' and np.random.random() < self.opt.aug_ddd:
aug = True
sf = self.opt.scale
cf = self.opt.shift
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
c[0] += img.shape[1] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += img.shape[0] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
trans_input = get_affine_transform(
c, s, 0, [self.opt.input_w, self.opt.input_h])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_w, self.opt.input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
# if self.split == 'train' and not self.opt.no_color_aug:
# color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
num_classes = self.opt.num_classes
trans_output = get_affine_transform(
c, s, 0, [self.opt.output_w, self.opt.output_h])
hm = np.zeros(
(num_classes, self.opt.output_h, self.opt.output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
dep = np.zeros((self.max_objs, 1), dtype=np.float32)
rotbin = np.zeros((self.max_objs, 2), dtype=np.int64)
rotres = np.zeros((self.max_objs, 2), dtype=np.float32)
dim = np.zeros((self.max_objs, 3), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
rot_mask = np.zeros((self.max_objs), dtype=np.uint8)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(self.cat_ids[ann['category_id']])
if cls_id <= -99:
continue
# if flipped:
# bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.opt.output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.opt.output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((h, w))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
if cls_id < 0:
ignore_id = [_ for _ in range(num_classes)] \
if cls_id == - 1 else [- cls_id - 2]
if self.opt.rect_mask:
hm[ignore_id, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1] = 0.9999
else:
for cc in ignore_id:
draw_gaussian(hm[cc], ct, radius)
hm[ignore_id, ct_int[1], ct_int[0]] = 0.9999
continue
draw_gaussian(hm[cls_id], ct, radius)
wh[k] = 1. * w, 1. * h
gt_det.append([ct[0], ct[1], 1] + \
self._alpha_to_8(self._convert_alpha(ann['alpha'])) + \
[ann['depth']] + (np.array(ann['dim']) / 1).tolist() + [cls_id])
if self.opt.reg_bbox:
gt_det[-1] = gt_det[-1][:-1] + [w, h] + [gt_det[-1][-1]]
# if (not self.opt.car_only) or cls_id == 1: # Only estimate ADD for cars !!!
if 1:
alpha = self._convert_alpha(ann['alpha'])
# print('img_id cls_id alpha rot_y', img_path, cls_id, alpha, ann['rotation_y'])
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
rotbin[k, 0] = 1
rotres[k, 0] = alpha - (-0.5 * np.pi)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
rotbin[k, 1] = 1
rotres[k, 1] = alpha - (0.5 * np.pi)
dep[k] = ann['depth']
dim[k] = ann['dim']
# print(' cat dim', cls_id, dim[k])
ind[k] = ct_int[1] * self.opt.output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1 if not aug else 0
rot_mask[k] = 1
# print('gt_det', gt_det)
# print('')
ret = {'input': inp, 'hm': hm, 'dep': dep, 'dim': dim, 'ind': ind,
'rotbin': rotbin, 'rotres': rotres, 'reg_mask': reg_mask,
'rot_mask': rot_mask}
if self.opt.reg_bbox:
ret.update({'wh': wh})
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not ('train' in self.split):
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 18), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'calib': calib,
'image_path': img_path, 'img_id': img_id}
ret['meta'] = meta
return ret
def _alpha_to_8(self, alpha):
# return [alpha, 0, 0, 0, 0, 0, 0, 0]
ret = [0, 0, 0, 1, 0, 0, 0, 1]
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
r = alpha - (-0.5 * np.pi)
ret[1] = 1
ret[2], ret[3] = np.sin(r), np.cos(r)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
r = alpha - (0.5 * np.pi)
ret[5] = 1
ret[6], ret[7] = np.sin(r), np.cos(r)
return ret
| 6,801 | 38.777778 | 90 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/sample/multi_pose.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
import math
class MultiPoseDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
rot = 0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.aug_rot:
rf = self.opt.rotate
rot = np.clip(np.random.randn()*rf, -rf*2, rf*2)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, rot, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_res, self.opt.input_res),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_res = self.opt.output_res
num_joints = self.num_joints
trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res])
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
hm = np.zeros((self.num_classes, output_res, output_res), dtype=np.float32)
hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
dense_kps = np.zeros((num_joints, 2, output_res, output_res),
dtype=np.float32)
dense_kps_mask = np.zeros((num_joints, output_res, output_res),
dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
kps = np.zeros((self.max_objs, num_joints * 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
kps_mask = np.zeros((self.max_objs, self.num_joints * 2), dtype=np.uint8)
hp_offset = np.zeros((self.max_objs * num_joints, 2), dtype=np.float32)
hp_ind = np.zeros((self.max_objs * num_joints), dtype=np.int64)
hp_mask = np.zeros((self.max_objs * num_joints), dtype=np.int64)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(ann['category_id']) - 1
pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3)
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
pts[:, 0] = width - pts[:, 0] - 1
for e in self.flip_idx:
pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy()
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox = np.clip(bbox, 0, output_res - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if (h > 0 and w > 0) or (rot != 0):
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = self.opt.hm_gauss if self.opt.mse_loss else max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_res + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
num_kpts = pts[:, 2].sum()
if num_kpts == 0:
hm[cls_id, ct_int[1], ct_int[0]] = 0.9999
reg_mask[k] = 0
hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))
hp_radius = self.opt.hm_gauss \
if self.opt.mse_loss else max(0, int(hp_radius))
for j in range(num_joints):
if pts[j, 2] > 0:
pts[j, :2] = affine_transform(pts[j, :2], trans_output_rot)
if pts[j, 0] >= 0 and pts[j, 0] < output_res and \
pts[j, 1] >= 0 and pts[j, 1] < output_res:
kps[k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int
kps_mask[k, j * 2: j * 2 + 2] = 1
pt_int = pts[j, :2].astype(np.int32)
hp_offset[k * num_joints + j] = pts[j, :2] - pt_int
hp_ind[k * num_joints + j] = pt_int[1] * output_res + pt_int[0]
hp_mask[k * num_joints + j] = 1
if self.opt.dense_hp:
# must be before draw center hm gaussian
draw_dense_reg(dense_kps[j], hm[cls_id], ct_int,
pts[j, :2] - ct_int, radius, is_offset=True)
draw_gaussian(dense_kps_mask[j], ct_int, radius)
draw_gaussian(hm_hp[j], pt_int, hp_radius)
draw_gaussian(hm[cls_id], ct_int, radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1] +
pts[:, :2].reshape(num_joints * 2).tolist() + [cls_id])
if rot != 0:
hm = hm * 0 + 0.9999
reg_mask *= 0
kps_mask *= 0
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh,
'hps': kps, 'hps_mask': kps_mask}
if self.opt.dense_hp:
dense_kps = dense_kps.reshape(num_joints * 2, output_res, output_res)
dense_kps_mask = dense_kps_mask.reshape(
num_joints, 1, output_res, output_res)
dense_kps_mask = np.concatenate([dense_kps_mask, dense_kps_mask], axis=1)
dense_kps_mask = dense_kps_mask.reshape(
num_joints * 2, output_res, output_res)
ret.update({'dense_hps': dense_kps, 'dense_hps_mask': dense_kps_mask})
del ret['hps'], ret['hps_mask']
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.hm_hp:
ret.update({'hm_hp': hm_hp})
if self.opt.reg_hp_offset:
ret.update({'hp_offset': hp_offset, 'hp_ind': hp_ind, 'hp_mask': hp_mask})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 40), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}
ret['meta'] = meta
return ret
| 7,913 | 42.01087 | 81 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/kitti.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
import torch.utils.data as data
class KITTI(data.Dataset):
num_classes = 3
default_resolution = [384, 1280]
mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(KITTI, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'kitti')
self.img_dir = os.path.join(self.data_dir, 'images', 'trainval')
if opt.trainval:
split = 'trainval' if split == 'train' else 'test'
self.img_dir = os.path.join(self.data_dir, 'images', split)
self.annot_path = os.path.join(
self.data_dir, 'annotations', 'kitti_{}.json').format(split)
else:
self.annot_path = os.path.join(self.data_dir,
'annotations', 'kitti_{}_{}.json').format(opt.kitti_split, split)
self.max_objs = 50
self.class_name = [
'__background__', 'Pedestrian', 'Car', 'Cyclist']
self.cat_ids = {1:0, 2:1, 3:2, 4:-3, 5:-3, 6:-2, 7:-99, 8:-99, 9:-1}
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
self.alpha_in_degree = False
print('==> initializing kitti {}, {} data.'.format(opt.kitti_split, split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def __len__(self):
return self.num_samples
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
pass
def save_results(self, results, save_dir):
results_dir = os.path.join(save_dir, 'results')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for img_id in results.keys():
out_path = os.path.join(results_dir, '{:06d}.txt'.format(img_id))
f = open(out_path, 'w')
for cls_ind in results[img_id]:
for j in range(len(results[img_id][cls_ind])):
class_name = self.class_name[cls_ind]
f.write('{} 0.0 0'.format(class_name))
for i in range(len(results[img_id][cls_ind][j])):
f.write(' {:.2f}'.format(results[img_id][cls_ind][j][i]))
f.write('\n')
f.close()
def run_eval(self, results, save_dir):
self.save_results(results, save_dir)
os.system('./tools/kitti_eval/evaluate_object_3d_offline ' + \
'../data/kitti/training/label_val ' + \
'{}/results/'.format(save_dir))
| 3,058 | 32.988889 | 79 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/visdrone.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class Visdrone(data.Dataset):
num_classes = 10
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(Visdrone, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'coco')
self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 16
self.class_name = ["pedestrian", "people", "bicycle", "car", "van", "truck", "tricycle", "awning-tricycle", "bus", "motor"]
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 4,040 | 35.405405 | 127 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/coco_hp.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class COCOHP(data.Dataset):
num_classes = 13
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(COCO, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'coco')
self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 128
self.class_name = ['short sleeve top', 'long sleeve top', 'short sleeve outwear', 'long sleeve outwear', 'vest',
'sling', 'shorts', 'trousers', 'skirt', 'short sleeve dress', 'long sleeve dress',
'vest dress', 'sling dress']
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 4,644 | 39.745614 | 120 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/pascal.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
import numpy as np
import torch
import json
import os
import torch.utils.data as data
class PascalVOC(data.Dataset):
num_classes = 20
default_resolution = [384, 384]
mean = np.array([0.485, 0.456, 0.406],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.229, 0.224, 0.225],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(PascalVOC, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'voc')
self.img_dir = os.path.join(self.data_dir, 'images')
_ann_name = {'train': 'trainval0712', 'val': 'test2007'}
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'pascal_{}.json').format(_ann_name[split])
self.max_objs = 50
self.class_name = ['__background__', "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
"horse", "motorbike", "person", "pottedplant", "sheep", "sofa",
"train", "tvmonitor"]
self._valid_ids = np.arange(1, 21, dtype=np.int32)
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
print('==> initializing pascal {} data.'.format(_ann_name[split]))
self.coco = coco.COCO(self.annot_path)
self.images = sorted(self.coco.getImgIds())
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
detections = [[[] for __ in range(self.num_samples)] \
for _ in range(self.num_classes + 1)]
for i in range(self.num_samples):
img_id = self.images[i]
for j in range(1, self.num_classes + 1):
if isinstance(all_bboxes[img_id][j], np.ndarray):
detections[j][i] = all_bboxes[img_id][j].tolist()
else:
detections[j][i] = all_bboxes[img_id][j]
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
os.system('python tools/reval.py ' + \
'{}/results.json'.format(save_dir))
| 3,032 | 35.542169 | 80 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/fashion.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class Fashion(data.Dataset):
num_classes = 13
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(Fashion, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'coco')
self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 16
self.class_name = ['short sleeve top', 'long sleeve top', 'short sleeve outwear', 'long sleeve outwear', 'vest', 'sling', 'shorts', 'trousers', 'skirt', 'short sleeve dress', 'long sleeve dress', 'vest dress', 'sling dress']
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 4,151 | 36.405405 | 228 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/coco.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class COCO(data.Dataset):
num_classes = 80
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(COCO, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'coco')
self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 128
self.class_name = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 5,214 | 39.115385 | 78 | py |
SyNet | SyNet-master/CenterNet/src/lib/utils/post_process.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from .image import transform_preds
from .ddd_utils import ddd2locrot
def get_pred_depth(depth):
return depth
def get_alpha(rot):
# output: (B, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
# bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
# return rot[:, 0]
idx = rot[:, 1] > rot[:, 5]
alpha1 = np.arctan2(rot[:, 2], rot[:, 3]) + (-0.5 * np.pi)
alpha2 = np.arctan2(rot[:, 6], rot[:, 7]) + ( 0.5 * np.pi)
return alpha1 * idx + alpha2 * (1 - idx)
def ddd_post_process_2d(dets, c, s, opt):
# dets: batch x max_dets x dim
# return 1-based class det list
ret = []
include_wh = dets.shape[2] > 16
for i in range(dets.shape[0]):
top_preds = {}
dets[i, :, :2] = transform_preds(
dets[i, :, 0:2], c[i], s[i], (opt.output_w, opt.output_h))
classes = dets[i, :, -1]
for j in range(opt.num_classes):
inds = (classes == j)
top_preds[j + 1] = np.concatenate([
dets[i, inds, :3].astype(np.float32),
get_alpha(dets[i, inds, 3:11])[:, np.newaxis].astype(np.float32),
get_pred_depth(dets[i, inds, 11:12]).astype(np.float32),
dets[i, inds, 12:15].astype(np.float32)], axis=1)
if include_wh:
top_preds[j + 1] = np.concatenate([
top_preds[j + 1],
transform_preds(
dets[i, inds, 15:17], c[i], s[i], (opt.output_w, opt.output_h))
.astype(np.float32)], axis=1)
ret.append(top_preds)
return ret
def ddd_post_process_3d(dets, calibs):
# dets: batch x max_dets x dim
# return 1-based class det list
ret = []
for i in range(len(dets)):
preds = {}
for cls_ind in dets[i].keys():
preds[cls_ind] = []
for j in range(len(dets[i][cls_ind])):
center = dets[i][cls_ind][j][:2]
score = dets[i][cls_ind][j][2]
alpha = dets[i][cls_ind][j][3]
depth = dets[i][cls_ind][j][4]
dimensions = dets[i][cls_ind][j][5:8]
wh = dets[i][cls_ind][j][8:10]
locations, rotation_y = ddd2locrot(
center, alpha, dimensions, depth, calibs[0])
bbox = [center[0] - wh[0] / 2, center[1] - wh[1] / 2,
center[0] + wh[0] / 2, center[1] + wh[1] / 2]
pred = [alpha] + bbox + dimensions.tolist() + \
locations.tolist() + [rotation_y, score]
preds[cls_ind].append(pred)
preds[cls_ind] = np.array(preds[cls_ind], dtype=np.float32)
ret.append(preds)
return ret
def ddd_post_process(dets, c, s, calibs, opt):
# dets: batch x max_dets x dim
# return 1-based class det list
dets = ddd_post_process_2d(dets, c, s, opt)
dets = ddd_post_process_3d(dets, calibs)
return dets
def ctdet_post_process(dets, c, s, h, w, num_classes):
# dets: batch x max_dets x dim
# return 1-based class det dict
ret = []
for i in range(dets.shape[0]):
top_preds = {}
dets[i, :, :2] = transform_preds(
dets[i, :, 0:2], c[i], s[i], (w, h))
dets[i, :, 2:4] = transform_preds(
dets[i, :, 2:4], c[i], s[i], (w, h))
classes = dets[i, :, -1]
for j in range(num_classes):
inds = (classes == j)
top_preds[j + 1] = np.concatenate([
dets[i, inds, :4].astype(np.float32),
dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist()
ret.append(top_preds)
return ret
def multi_pose_post_process(dets, c, s, h, w):
# dets: batch x max_dets x 40
# return list of 39 in image coord
ret = []
for i in range(dets.shape[0]):
bbox = transform_preds(dets[i, :, :4].reshape(-1, 2), c[i], s[i], (w, h))
pts = transform_preds(dets[i, :, 5:39].reshape(-1, 2), c[i], s[i], (w, h))
top_preds = np.concatenate(
[bbox.reshape(-1, 4), dets[i, :, 4:5],
pts.reshape(-1, 34)], axis=1).astype(np.float32).tolist()
ret.append({np.ones(1, dtype=np.int32)[0]: top_preds})
return ret
| 3,958 | 33.426087 | 78 | py |
SyNet | SyNet-master/CenterNet/src/lib/utils/image.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao ([email protected])
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import random
def flip(img):
return img[:, :, ::-1].copy()
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(img,
trans,
(int(output_size[0]), int(output_size[1])),
flags=cv2.INTER_LINEAR)
return dst_img
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
dim = value.shape[0]
reg = np.ones((dim, diameter*2+1, diameter*2+1), dtype=np.float32) * value
if is_offset and dim == 2:
delta = np.arange(diameter*2+1) - radius
reg[0] = reg[0] - delta.reshape(1, -1)
reg[1] = reg[1] - delta.reshape(-1, 1)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom,
radius - left:radius + right]
masked_reg = reg[:, radius - top:radius + bottom,
radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
idx = (masked_gaussian >= masked_heatmap).reshape(
1, masked_gaussian.shape[0], masked_gaussian.shape[1])
masked_regmap = (1-idx) * masked_regmap + idx * masked_reg
regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap
return regmap
def draw_msra_gaussian(heatmap, center, sigma):
tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
return heatmap
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3, ))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= (1 - alpha)
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_aug(data_rng, image, eig_val, eig_vec):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
lighting_(data_rng, image, 0.1, eig_val, eig_vec)
| 7,690 | 32.294372 | 88 | py |
SyNet | SyNet-master/CenterNet/src/lib/utils/debugger.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
from .ddd_utils import compute_box_3d, project_to_image, draw_box_3d
class Debugger(object):
def __init__(self, ipynb=False, theme='black',
num_classes=-1, dataset=None, down_ratio=4):
self.ipynb = ipynb
if not self.ipynb:
import matplotlib.pyplot as plt
self.plt = plt
self.imgs = {}
self.theme = theme
colors = [(color_list[_]).astype(np.uint8) \
for _ in range(len(color_list))]
self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3)
if self.theme == 'white':
self.colors = self.colors.reshape(-1)[::-1].reshape(len(colors), 1, 1, 3)
self.colors = np.clip(self.colors, 0., 0.6 * 255).astype(np.uint8)
self.dim_scale = 1
if dataset == 'coco_hp':
self.names = ['p']
self.num_class = 1
self.num_joints = 17
self.edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[3, 5], [4, 6], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[5, 11], [6, 12], [11, 12],
[11, 13], [13, 15], [12, 14], [14, 16]]
self.ec = [(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 255),
(255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 255),
(255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255)]
self.colors_hp = [(255, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255)]
elif num_classes == 80 or dataset == 'coco':
self.names = coco_class_name
elif num_classes == 20 or dataset == 'pascal':
self.names = pascal_class_name
elif dataset == 'visdrone':
self.names=visdrone_class_name
elif dataset == 'gta':
self.names = gta_class_name
self.focal_length = 935.3074360871937
self.W = 1920
self.H = 1080
self.dim_scale = 3
elif dataset == 'viper':
self.names = gta_class_name
self.focal_length = 1158
self.W = 1920
self.H = 1080
self.dim_scale = 3
elif num_classes == 3 or dataset == 'kitti':
self.names = kitti_class_name
self.focal_length = 721.5377
self.W = 1242
self.H = 375
num_classes = len(self.names)
self.down_ratio=down_ratio
# for bird view
self.world_size = 64
self.out_size = 384
def add_img(self, img, img_id='default', revert_color=False):
if revert_color:
img = 255 - img
self.imgs[img_id] = img.copy()
def add_mask(self, mask, bg, imgId = 'default', trans = 0.8):
self.imgs[imgId] = (mask.reshape(
mask.shape[0], mask.shape[1], 1) * 255 * trans + \
bg * (1 - trans)).astype(np.uint8)
def show_img(self, pause = False, imgId = 'default'):
cv2.imshow('{}'.format(imgId), self.imgs[imgId])
if pause:
cv2.waitKey()
def add_blend_img(self, back, fore, img_id='blend', trans=0.7):
if self.theme == 'white':
fore = 255 - fore
if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]:
fore = cv2.resize(fore, (back.shape[1], back.shape[0]))
if len(fore.shape) == 2:
fore = fore.reshape(fore.shape[0], fore.shape[1], 1)
self.imgs[img_id] = (back * (1. - trans) + fore * trans)
self.imgs[img_id][self.imgs[img_id] > 255] = 255
self.imgs[img_id][self.imgs[img_id] < 0] = 0
self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy()
'''
# slow version
def gen_colormap(self, img, output_res=None):
# num_classes = len(self.colors)
img[img < 0] = 0
h, w = img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8)
for i in range(img.shape[0]):
resized = cv2.resize(img[i], (output_res[1], output_res[0]))
resized = resized.reshape(output_res[0], output_res[1], 1)
cl = self.colors[i] if not (self.theme == 'white') \
else 255 - self.colors[i]
color_map = np.maximum(color_map, (resized * cl).astype(np.uint8))
return color_map
'''
def gen_colormap(self, img, output_res=None):
img = img.copy()
c, h, w = img.shape[0], img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
colors = np.array(
self.colors, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
if self.theme == 'white':
colors = 255 - colors
color_map = (img * colors).max(axis=2).astype(np.uint8)
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
return color_map
'''
# slow
def gen_colormap_hp(self, img, output_res=None):
# num_classes = len(self.colors)
# img[img < 0] = 0
h, w = img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8)
for i in range(img.shape[0]):
resized = cv2.resize(img[i], (output_res[1], output_res[0]))
resized = resized.reshape(output_res[0], output_res[1], 1)
cl = self.colors_hp[i] if not (self.theme == 'white') else \
(255 - np.array(self.colors_hp[i]))
color_map = np.maximum(color_map, (resized * cl).astype(np.uint8))
return color_map
'''
def gen_colormap_hp(self, img, output_res=None):
c, h, w = img.shape[0], img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
colors = np.array(
self.colors_hp, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
if self.theme == 'white':
colors = 255 - colors
color_map = (img * colors).max(axis=2).astype(np.uint8)
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
return color_map
def add_rect(self, rect1, rect2, c, conf=1, img_id='default'):
cv2.rectangle(
self.imgs[img_id], (rect1[0], rect1[1]), (rect2[0], rect2[1]), c, 2)
if conf < 1:
cv2.circle(self.imgs[img_id], (rect1[0], rect1[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[img_id], (rect2[0], rect2[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[img_id], (rect1[0], rect2[1]), int(10 * conf), c, 1)
cv2.circle(self.imgs[img_id], (rect2[0], rect1[1]), int(10 * conf), c, 1)
def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True, img_id='default'):
bbox = np.array(bbox, dtype=np.int32)
# cat = (int(cat) + 1) % 80
cat = int(cat)
# print('cat', cat, self.names[cat])
c = self.colors[cat][0][0].tolist()
if self.theme == 'white':
c = (255 - np.array(c)).tolist()
txt = '{}{:.1f}'.format(self.names[cat], conf)
font = cv2.FONT_HERSHEY_SIMPLEX
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
cv2.rectangle(
self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2)
if show_txt:
cv2.rectangle(self.imgs[img_id],
(bbox[0], bbox[1] - cat_size[1] - 2),
(bbox[0] + cat_size[0], bbox[1] - 2), c, -1)
cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2),
font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
def add_coco_hp(self, points, img_id='default'):
points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2)
for j in range(self.num_joints):
cv2.circle(self.imgs[img_id],
(points[j, 0], points[j, 1]), 3, self.colors_hp[j], -1)
for j, e in enumerate(self.edges):
if points[e].min() > 0:
cv2.line(self.imgs[img_id], (points[e[0], 0], points[e[0], 1]),
(points[e[1], 0], points[e[1], 1]), self.ec[j], 2,
lineType=cv2.LINE_AA)
def add_points(self, points, img_id='default'):
num_classes = len(points)
# assert num_classes == len(self.colors)
for i in range(num_classes):
for j in range(len(points[i])):
c = self.colors[i, 0, 0]
cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio,
points[i][j][1] * self.down_ratio),
5, (255, 255, 255), -1)
cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio,
points[i][j][1] * self.down_ratio),
3, (int(c[0]), int(c[1]), int(c[2])), -1)
def show_all_imgs(self, pause=False, time=0):
if not self.ipynb:
for i, v in self.imgs.items():
cv2.imshow('{}'.format(i), v)
if cv2.waitKey(0 if pause else 1) == 27:
import sys
sys.exit(0)
else:
self.ax = None
nImgs = len(self.imgs)
fig=self.plt.figure(figsize=(nImgs * 10,10))
nCols = nImgs
nRows = nImgs // nCols
for i, (k, v) in enumerate(self.imgs.items()):
fig.add_subplot(1, nImgs, i + 1)
if len(v.shape) == 3:
self.plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB))
else:
self.plt.imshow(v)
self.plt.show()
def save_img(self, imgId='default', path='./cache/debug/'):
cv2.imwrite(path + '{}.png'.format(imgId), self.imgs[imgId])
def save_all_imgs(self, path='./cache/debug/', prefix='', genID=False):
if genID:
try:
idx = int(np.loadtxt(path + '/id.txt'))
except:
idx = 0
prefix=idx
np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d')
for i, v in self.imgs.items():
cv2.imwrite(path + '/{}{}.png'.format(prefix, i), v)
def remove_side(self, img_id, img):
if not (img_id in self.imgs):
return
ws = img.sum(axis=2).sum(axis=0)
l = 0
while ws[l] == 0 and l < len(ws):
l+= 1
r = ws.shape[0] - 1
while ws[r] == 0 and r > 0:
r -= 1
hs = img.sum(axis=2).sum(axis=1)
t = 0
while hs[t] == 0 and t < len(hs):
t += 1
b = hs.shape[0] - 1
while hs[b] == 0 and b > 0:
b -= 1
self.imgs[img_id] = self.imgs[img_id][t:b+1, l:r+1].copy()
def project_3d_to_bird(self, pt):
pt[0] += self.world_size / 2
pt[1] = self.world_size - pt[1]
pt = pt * self.out_size / self.world_size
return pt.astype(np.int32)
def add_ct_detection(
self, img, dets, show_box=False, show_txt=True,
center_thresh=0.5, img_id='det'):
# dets: max_preds x 5
self.imgs[img_id] = img.copy()
if type(dets) == type({}):
for cat in dets:
for i in range(len(dets[cat])):
if dets[cat][i, 2] > center_thresh:
cl = (self.colors[cat, 0, 0]).tolist()
ct = dets[cat][i, :2].astype(np.int32)
if show_box:
w, h = dets[cat][i, -2], dets[cat][i, -1]
x, y = dets[cat][i, 0], dets[cat][i, 1]
bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2],
dtype=np.float32)
self.add_coco_bbox(
bbox, cat - 1, dets[cat][i, 2],
show_txt=show_txt, img_id=img_id)
else:
for i in range(len(dets)):
if dets[i, 2] > center_thresh:
# print('dets', dets[i])
cat = int(dets[i, -1])
cl = (self.colors[cat, 0, 0] if self.theme == 'black' else \
255 - self.colors[cat, 0, 0]).tolist()
ct = dets[i, :2].astype(np.int32) * self.down_ratio
cv2.circle(self.imgs[img_id], (ct[0], ct[1]), 3, cl, -1)
if show_box:
w, h = dets[i, -3] * self.down_ratio, dets[i, -2] * self.down_ratio
x, y = dets[i, 0] * self.down_ratio, dets[i, 1] * self.down_ratio
bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2],
dtype=np.float32)
self.add_coco_bbox(bbox, dets[i, -1], dets[i, 2], img_id=img_id)
def add_3d_detection(
self, image_or_path, dets, calib, show_txt=False,
center_thresh=0.5, img_id='det'):
if isinstance(image_or_path, np.ndarray):
self.imgs[img_id] = image_or_path
else:
self.imgs[img_id] = cv2.imread(image_or_path)
for cat in dets:
for i in range(len(dets[cat])):
cl = (self.colors[cat - 1, 0, 0]).tolist()
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
# loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale
# dim = dim / self.dim_scale
if loc[2] > 1:
box_3d = compute_box_3d(dim, loc, rot_y)
box_2d = project_to_image(box_3d, calib)
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
def compose_vis_add(
self, img_path, dets, calib,
center_thresh, pred, bev, img_id='out'):
self.imgs[img_id] = cv2.imread(img_path)
# h, w = self.imgs[img_id].shape[:2]
# pred = cv2.resize(pred, (h, w))
h, w = pred.shape[:2]
hs, ws = self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w
self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h))
self.add_blend_img(self.imgs[img_id], pred, img_id)
for cat in dets:
for i in range(len(dets[cat])):
cl = (self.colors[cat - 1, 0, 0]).tolist()
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
# loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale
# dim = dim / self.dim_scale
if loc[2] > 1:
box_3d = compute_box_3d(dim, loc, rot_y)
box_2d = project_to_image(box_3d, calib)
box_2d[:, 0] /= hs
box_2d[:, 1] /= ws
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
self.imgs[img_id] = np.concatenate(
[self.imgs[img_id], self.imgs[bev]], axis=1)
def add_2d_detection(
self, img, dets, show_box=False, show_txt=True,
center_thresh=0.5, img_id='det'):
self.imgs[img_id] = img
for cat in dets:
for i in range(len(dets[cat])):
cl = (self.colors[cat - 1, 0, 0]).tolist()
if dets[cat][i, -1] > center_thresh:
bbox = dets[cat][i, 1:5]
self.add_coco_bbox(
bbox, cat - 1, dets[cat][i, -1],
show_txt=show_txt, img_id=img_id)
def add_bird_view(self, dets, center_thresh=0.3, img_id='bird'):
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
for cat in dets:
cl = (self.colors[cat - 1, 0, 0]).tolist()
lc = (250, 152, 12)
for i in range(len(dets[cat])):
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
for k in range(4):
rect[k] = self.project_3d_to_bird(rect[k])
# cv2.circle(bird_view, (rect[k][0], rect[k][1]), 2, lc, -1)
cv2.polylines(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
True,lc,2,lineType=cv2.LINE_AA)
for e in [[0, 1]]:
t = 4 if e == [0, 1] else 1
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
(rect[e[1]][0], rect[e[1]][1]), lc, t,
lineType=cv2.LINE_AA)
self.imgs[img_id] = bird_view
def add_bird_views(self, dets_dt, dets_gt, center_thresh=0.3, img_id='bird'):
alpha = 0.5
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
for ii, (dets, lc, cc) in enumerate(
[(dets_gt, (12, 49, 250), (0, 0, 255)),
(dets_dt, (250, 152, 12), (255, 0, 0))]):
# cc = np.array(lc, dtype=np.uint8).reshape(1, 1, 3)
for cat in dets:
cl = (self.colors[cat - 1, 0, 0]).tolist()
for i in range(len(dets[cat])):
if dets[cat][i, -1] > center_thresh:
dim = dets[cat][i, 5:8]
loc = dets[cat][i, 8:11]
rot_y = dets[cat][i, 11]
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
for k in range(4):
rect[k] = self.project_3d_to_bird(rect[k])
if ii == 0:
cv2.fillPoly(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
lc,lineType=cv2.LINE_AA)
else:
cv2.polylines(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
True,lc,2,lineType=cv2.LINE_AA)
# for e in [[0, 1], [1, 2], [2, 3], [3, 0]]:
for e in [[0, 1]]:
t = 4 if e == [0, 1] else 1
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
(rect[e[1]][0], rect[e[1]][1]), lc, t,
lineType=cv2.LINE_AA)
self.imgs[img_id] = bird_view
kitti_class_name = [
'p', 'v', 'b'
]
gta_class_name = [
'p', 'v'
]
pascal_class_name = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
"car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
"person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
coco_class_name = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
visdrone_class_name = ["pedestrian", "people", "bicycle", "car", "van", "truck", "tricycle", "awning-tricycle", "bus", "motor"]
color_list = np.array(
[
1.000, 1.000, 1.000,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
0.000, 0.447, 0.741,
0.50, 0.5, 0
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
| 21,425 | 38.09854 | 127 | py |
SyNet | SyNet-master/CenterNet/src/lib/utils/oracle_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numba
@numba.jit(nopython=True, nogil=True)
def gen_oracle_map(feat, ind, w, h):
# feat: B x maxN x featDim
# ind: B x maxN
batch_size = feat.shape[0]
max_objs = feat.shape[1]
feat_dim = feat.shape[2]
out = np.zeros((batch_size, feat_dim, h, w), dtype=np.float32)
vis = np.zeros((batch_size, h, w), dtype=np.uint8)
ds = [(0, 1), (0, -1), (1, 0), (-1, 0)]
for i in range(batch_size):
queue_ind = np.zeros((h*w*2, 2), dtype=np.int32)
queue_feat = np.zeros((h*w*2, feat_dim), dtype=np.float32)
head, tail = 0, 0
for j in range(max_objs):
if ind[i][j] > 0:
x, y = ind[i][j] % w, ind[i][j] // w
out[i, :, y, x] = feat[i][j]
vis[i, y, x] = 1
queue_ind[tail] = x, y
queue_feat[tail] = feat[i][j]
tail += 1
while tail - head > 0:
x, y = queue_ind[head]
f = queue_feat[head]
head += 1
for (dx, dy) in ds:
xx, yy = x + dx, y + dy
if xx >= 0 and yy >= 0 and xx < w and yy < h and vis[i, yy, xx] < 1:
out[i, :, yy, xx] = f
vis[i, yy, xx] = 1
queue_ind[tail] = xx, yy
queue_feat[tail] = f
tail += 1
return out | 1,317 | 30.380952 | 76 | py |
SyNet | SyNet-master/CenterNet/src/lib/utils/utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count > 0:
self.avg = self.sum / self.count | 542 | 22.608696 | 59 | py |
SyNet | SyNet-master/CenterNet/src/lib/utils/ddd_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
def compute_box_3d(dim, location, rotation_y):
# dim: 3
# location: 3
# rotation_y: 1
# return: 8 x 3
c, s = np.cos(rotation_y), np.sin(rotation_y)
R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32)
l, w, h = dim[2], dim[1], dim[0]
x_corners = [l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2]
y_corners = [0,0,0,0,-h,-h,-h,-h]
z_corners = [w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2]
corners = np.array([x_corners, y_corners, z_corners], dtype=np.float32)
corners_3d = np.dot(R, corners)
corners_3d = corners_3d + np.array(location, dtype=np.float32).reshape(3, 1)
return corners_3d.transpose(1, 0)
def project_to_image(pts_3d, P):
# pts_3d: n x 3
# P: 3 x 4
# return: n x 2
pts_3d_homo = np.concatenate(
[pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1)
pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0)
pts_2d = pts_2d[:, :2] / pts_2d[:, 2:]
# import pdb; pdb.set_trace()
return pts_2d
def compute_orientation_3d(dim, location, rotation_y):
# dim: 3
# location: 3
# rotation_y: 1
# return: 2 x 3
c, s = np.cos(rotation_y), np.sin(rotation_y)
R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32)
orientation_3d = np.array([[0, dim[2]], [0, 0], [0, 0]], dtype=np.float32)
orientation_3d = np.dot(R, orientation_3d)
orientation_3d = orientation_3d + \
np.array(location, dtype=np.float32).reshape(3, 1)
return orientation_3d.transpose(1, 0)
def draw_box_3d(image, corners, c=(0, 0, 255)):
face_idx = [[0,1,5,4],
[1,2,6, 5],
[2,3,7,6],
[3,0,4,7]]
for ind_f in range(3, -1, -1):
f = face_idx[ind_f]
for j in range(4):
cv2.line(image, (corners[f[j], 0], corners[f[j], 1]),
(corners[f[(j+1)%4], 0], corners[f[(j+1)%4], 1]), c, 2, lineType=cv2.LINE_AA)
if ind_f == 0:
cv2.line(image, (corners[f[0], 0], corners[f[0], 1]),
(corners[f[2], 0], corners[f[2], 1]), c, 1, lineType=cv2.LINE_AA)
cv2.line(image, (corners[f[1], 0], corners[f[1], 1]),
(corners[f[3], 0], corners[f[3], 1]), c, 1, lineType=cv2.LINE_AA)
return image
def unproject_2d_to_3d(pt_2d, depth, P):
# pts_2d: 2
# depth: 1
# P: 3 x 4
# return: 3
z = depth - P[2, 3]
x = (pt_2d[0] * depth - P[0, 3] - P[0, 2] * z) / P[0, 0]
y = (pt_2d[1] * depth - P[1, 3] - P[1, 2] * z) / P[1, 1]
pt_3d = np.array([x, y, z], dtype=np.float32)
return pt_3d
def alpha2rot_y(alpha, x, cx, fx):
"""
Get rotation_y by alpha + theta - 180
alpha : Observation angle of object, ranging [-pi..pi]
x : Object center x to the camera center (x-W/2), in pixels
rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi]
"""
rot_y = alpha + np.arctan2(x - cx, fx)
if rot_y > np.pi:
rot_y -= 2 * np.pi
if rot_y < -np.pi:
rot_y += 2 * np.pi
return rot_y
def rot_y2alpha(rot_y, x, cx, fx):
"""
Get rotation_y by alpha + theta - 180
alpha : Observation angle of object, ranging [-pi..pi]
x : Object center x to the camera center (x-W/2), in pixels
rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi]
"""
alpha = rot_y - np.arctan2(x - cx, fx)
if alpha > np.pi:
alpha -= 2 * np.pi
if alpha < -np.pi:
alpha += 2 * np.pi
return alpha
def ddd2locrot(center, alpha, dim, depth, calib):
# single image
locations = unproject_2d_to_3d(center, depth, calib)
locations[1] += dim[0] / 2
rotation_y = alpha2rot_y(alpha, center[0], calib[0, 2], calib[0, 0])
return locations, rotation_y
def project_3d_bbox(location, dim, rotation_y, calib):
box_3d = compute_box_3d(dim, location, rotation_y)
box_2d = project_to_image(box_3d, calib)
return box_2d
if __name__ == '__main__':
calib = np.array(
[[7.070493000000e+02, 0.000000000000e+00, 6.040814000000e+02, 4.575831000000e+01],
[0.000000000000e+00, 7.070493000000e+02, 1.805066000000e+02, -3.454157000000e-01],
[0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 4.981016000000e-03]],
dtype=np.float32)
alpha = -0.20
tl = np.array([712.40, 143.00], dtype=np.float32)
br = np.array([810.73, 307.92], dtype=np.float32)
ct = (tl + br) / 2
rotation_y = 0.01
print('alpha2rot_y', alpha2rot_y(alpha, ct[0], calib[0, 2], calib[0, 0]))
print('rotation_y', rotation_y) | 4,548 | 33.725191 | 92 | py |
SyNet | SyNet-master/CenterNet/data/example.py | #-*-coding:utf-8-*-
import os
from xml.etree.ElementTree import dump
import json
import pprint
import argparse
from Format import VOC, COCO, UDACITY, KITTI, YOLO
parser = argparse.ArgumentParser(description='label Converting example.')
parser.add_argument('--datasets', type=str, help='type of datasets')
parser.add_argument('--img_path', type=str, help='directory of image folder')
parser.add_argument('--label', type=str, help='directory of label folder or label file path')
parser.add_argument('--convert_output_path', type=str, help='directory of label folder')
parser.add_argument('--img_type', type=str, help='type of image')
parser.add_argument('--manipast_path', type=str, help='directory of manipast file', default="./")
parser.add_argument('--cls_list_file', type=str, help='directory of *.names file', default="./")
args = parser.parse_args()
def main(config):
if config["datasets"] == "VOC":
voc = VOC()
yolo = YOLO(os.path.abspath(config["cls_list"]))
flag, data = voc.parse(config["label"])
if flag == True:
flag, data = yolo.generate(data)
if flag == True:
flag, data = yolo.save(data, config["output_path"], config["img_path"] ,
config["img_type"], config["manipast_path"])
if flag == False:
print("Saving Result : {}, msg : {}".format(flag, data))
else:
print("YOLO Generating Result : {}, msg : {}".format(flag, data))
else:
print("VOC Parsing Result : {}, msg : {}".format(flag, data))
elif config["datasets"] == "COCO":
coco = COCO()
yolo = YOLO(os.path.abspath(config["cls_list"]))
flag, data = coco.parse(config["label"])
if flag == True:
flag, data = yolo.generate(data)
if flag == True:
flag, data = yolo.save(data, config["output_path"], config["img_path"],
config["img_type"], config["manipast_path"])
if flag == False:
print("Saving Result : {}, msg : {}".format(flag, data))
else:
print("YOLO Generating Result : {}, msg : {}".format(flag, data))
else:
print("COCO Parsing Result : {}, msg : {}".format(flag, data))
elif config["datasets"] == "UDACITY":
udacity = UDACITY()
yolo = YOLO(os.path.abspath(config["cls_list"]))
flag, data = udacity.parse(config["label"])
if flag == True:
flag, data = yolo.generate(data)
if flag == True:
flag, data = yolo.save(data, config["output_path"], config["img_path"],
config["img_type"], config["manipast_path"])
if flag == False:
print("Saving Result : {}, msg : {}".format(flag, data))
else:
print("UDACITY Generating Result : {}, msg : {}".format(flag, data))
else:
print("COCO Parsing Result : {}, msg : {}".format(flag, data))
elif config["datasets"] == "KITTI":
kitti = KITTI()
yolo = YOLO(os.path.abspath(config["cls_list"]))
flag, data = kitti.parse(config["label"], config["img_path"], img_type=config["img_type"])
if flag == True:
flag, data = yolo.generate(data)
if flag == True:
flag, data = yolo.save(data, config["output_path"], config["img_path"],
config["img_type"], config["manipast_path"])
if flag == False:
print("Saving Result : {}, msg : {}".format(flag, data))
else:
print("YOLO Generating Result : {}, msg : {}".format(flag, data))
else:
print("KITTI Parsing Result : {}, msg : {}".format(flag, data))
else:
print("Unkwon Datasets")
if __name__ == '__main__':
config ={
"datasets": args.datasets,
"img_path": args.img_path,
"label": args.label,
"img_type": args.img_type,
"manipast_path": args.manipast_path,
"output_path": args.convert_output_path,
"cls_list": args.cls_list_file,
}
main(config)
| 4,305 | 31.37594 | 98 | py |
SyNet | SyNet-master/CenterNet/data/msgLogInfo.py | class color:
BOLD = '\033[1m'
END = '\033[0m'
DEFAULT = '\033[0;37;40m'
RED = '\033[91m' | 104 | 20 | 29 | py |
SyNet | SyNet-master/CenterNet/data/label_visualization.py | #-*-coding:utf-8-*-
import os
import argparse
import time
import pprint
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import json
from Format import VOC, COCO, UDACITY, KITTI, YOLO
parser = argparse.ArgumentParser(description='Evaluate label Converting.')
parser.add_argument('--datasets', type=str, help='type of datasets')
parser.add_argument('--img_path', type=str, help='directory of image folder')
parser.add_argument('--label_path', type=str, help='directory of label folder')
parser.add_argument('--img_type', type=str, help='type of image', default='.jpg')
parser.add_argument('--cls_list_file', type=str, help='directory of *.names file', default="./")
args = parser.parse_args()
def main():
pp = pprint.PrettyPrinter(indent=4)
img_path = args.img_path
label_path = args.label_path
img_type = args.img_type
datasets = args.datasets
cls_list = args.cls_list_file
result = None
data = None
if datasets == "COCO":
coco = COCO()
result, data = coco.parse(label_path)
elif datasets == "VOC":
voc = VOC()
result, data = voc.parse(label_path)
elif datasets == "UDACITY":
udacity = UDACITY()
result, data = udacity.parse(label_path, img_path)
elif datasets == "KITTI":
kitti = KITTI()
result, data = kitti.parse(label_path, img_path, img_type=img_type)
elif datasets == "YOLO":
yolo =YOLO(os.path.abspath(cls_list))
result, data = yolo.parse(label_path, img_path, img_type=img_type)
if result is True:
for key in data:
filepath = "".join([img_path, key, img_type])
im = Image.open(filepath)
draw = ImageDraw.Draw(im)
print("data['{}']: ".format(key), end="")
pp.pprint(data[key])
print("num_object : {}".format(data[key]["objects"]["num_obj"]))
for idx in range(0, int(data[key]["objects"]["num_obj"])):
print("idx {}, name : {}, bndbox :{}".format(idx, data[key]["objects"][str(idx)]["name"], data[key]["objects"][str(idx)]["bndbox"]))
x0 = data[key]["objects"][str(idx)]["bndbox"]["xmin"]
y0 = data[key]["objects"][str(idx)]["bndbox"]["ymin"]
x1 = data[key]["objects"][str(idx)]["bndbox"]["xmax"]
y1 = data[key]["objects"][str(idx)]["bndbox"]["ymax"]
draw.rectangle(((x0,y0), (x1,y1)), outline='#00ff88')
draw.text((x0,y0), data[key]["objects"][str(idx)]["name"])
del draw
print("===============================================================================================\n\n")
plt.imshow(im)
plt.show()
plt.clf()
im.close()
else:
print("return value : {}, msg : {}, args: {}".format(result, data, args))
if __name__ == '__main__':
main()
| 2,912 | 32.872093 | 148 | py |
SyNet | SyNet-master/Ensemble/converFastCOCO.py | import json
import pickle as pk
import numpy as np
from ensemble_boxes import *
# tempDict = next(item for item in out if item['image_id'] == 1 and item['score']>confidence)
numVal = 5000
numTrain = 191961
confidence = 0.001
with open('COCOVAL.json', 'r') as f:
out = json.load(f)
with open('centerval.json', 'r') as f:
out2 = json.load(f)
# boxesYOLO = pk.load(open('val_boxes.pkl','rb'))
# labelsYOLO = pk.load(open('val_labels.pkl','rb'))
# scoresYOLO =pk. load(open('val_scores.pkl','rb'))
boxesfast = []
labelsfast = []
scoresfast = []
idsfast = []
boxescent = []
labelscent = []
scorescent = []
idscent = []
index = 0
index2 = 0
tempID = out[index]['image_id']
temptemp = 0
allids = []
for i in range(5000):
check = True
tempBoxes = []
tempLabels = []
tempScores = []
tempIds = []
tempBoxes2 = []
tempLabels2 = []
tempScores2 = []
tempIds2 = []
while(check and index<len(out)):
item = out[index]
if item['image_id'] == tempID:
print('Sample ' + str(tempID))
tempBBOX = item['bbox']
tempBBOX[2] += tempBBOX[0]
tempBBOX[3] += tempBBOX[1]
tempBoxes.append(tempBBOX)
tempScores.append(item['score'])
tempLabels.append(item['category_id'] - 1)
tempIds.append(item['image_id'])
temptemp = tempID
index +=1
else:
allids.append(tempID)
tempID = out[index]['image_id']
check = False
boxesfast.append(np.asarray(tempBoxes))
labelsfast.append(np.asarray(tempLabels))
scoresfast.append(np.asarray(tempScores))
idsfast.append(np.asarray(tempIds))
# tmp = 0
# check = True
# while (check and tmp < len(out)):
# item = out2[tmp]
# if item['image_id'] == temptemp:
# check = False
# else:
# tmp +=1
# check = True
# while (check and tmp < len(out)):
# item = out2[tmp]
# if item['image_id'] == temptemp:
# tempBBOX = item['bbox']
# tempBBOX[2] += tempBBOX[0]
# tempBBOX[3] += tempBBOX[1]
# tempBoxes2.append(tempBBOX)
# tempScores2.append(item['score'])
# tempLabels2.append(item['category_id'] - 1)
# tempIds2.append(item['image_id'])
# tmp += 1
# else:
# check = False
#
# boxescent.append(np.asarray(tempBoxes2))
# labelscent.append(np.asarray(tempLabels2))
# scorescent.append(np.asarray(tempScores2))
# idscent.append(np.asarray(tempIds2))
with open('fastcocobox.pkl', 'wb') as f:
pk.dump(boxesfast, f)
with open('fastcocolab.pkl', 'wb') as f:
pk.dump(labelsfast, f)
with open('fastcocosco.pkl', 'wb') as f:
pk.dump(scoresfast, f)
with open('fastcocoid.pkl', 'wb') as f:
pk.dump(idsfast, f)
# with open('centcocobox.pkl', 'wb') as f:
# pk.dump(boxescent, f)
# with open('centcocolab.pkl', 'wb') as f:
# pk.dump(labelscent, f)
# with open('centcocosco.pkl', 'wb') as f:
# pk.dump(scorescent, f)
# with open('centcocoid.pkl', 'wb') as f:
# pk.dump(idscent, f)
with open('allids.pkl', 'wb') as f:
pk.dump(allids, f)
a = 5 | 3,227 | 27.315789 | 97 | py |
SyNet | SyNet-master/Ensemble/convertCENTER.py | import json
import pickle as pk
import numpy as np
from ensemble_boxes import *
# tempDict = next(item for item in out if item['image_id'] == 1 and item['score']>confidence)
numVal = 548
numTrain = 191961
confidence = 0.001
with open('results.json', 'r') as f:
out = json.load(f)
# boxesYOLO = pk.load(open('val_boxes.pkl','rb'))
# labelsYOLO = pk.load(open('val_labels.pkl','rb'))
# scoresYOLO =pk. load(open('val_scores.pkl','rb'))
boxesCSC = []
labelsCSC = []
scoresCSC = []
index = 0
for i in range(548):
tempID = i + 1
check = True
tempBoxes = []
tempLabels = []
tempScores = []
while(check and index<len(out)):
item = out[index]
if item['image_id'] == tempID:
print('Sample ' + str(tempID))
tempBBOX = item['bbox']
tempBBOX[2] += tempBBOX[0]
tempBBOX[3] += tempBBOX[1]
tempBoxes.append(tempBBOX)
tempScores.append(item['score'])
tempLabels.append(item['category_id'] - 1)
index +=1
else:
check = False
boxesCSC.append(np.asarray(tempBoxes))
labelsCSC.append(np.asarray(tempLabels))
scoresCSC.append(np.asarray(tempScores))
with open('val_boxes_center.pkl', 'wb') as f:
pk.dump(boxesCSC, f)
with open('val_labels_center.pkl', 'wb') as f:
pk.dump(labelsCSC, f)
with open('val_scores_center.pkl', 'wb') as f:
pk.dump(scoresCSC, f)
a = 5 | 1,434 | 26.596154 | 97 | py |
SyNet | SyNet-master/Ensemble/convBBOXCOCO.py | import pickle as pk
import numpy as np
import json
numVal = 5000
numTrain = 191961
COCO_id_to_category_id = {13: 12, 14: 13, 15: 14, 16: 15, 17: 16, 18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24, 27: 25, 28: 26, 31: 27, 32: 28, 33: 29, 34: 30, 35: 31, 36: 32, 37: 33, 38: 34, 39: 35, 40: 36, 41: 37, 42: 38, 43: 39, 44: 40, 46: 41, 47: 42, 48: 43, 49: 44, 50: 45, 51: 46, 52: 47, 53: 48, 54: 49, 55: 50, 56: 51, 57: 52, 58: 53, 59: 54, 60: 55, 61: 56, 62: 57, 63: 58, 64: 59, 65: 60, 67: 61, 70: 62, 72: 63, 73: 64, 74: 65, 75: 66, 76: 67, 77: 68, 78: 69, 79: 70, 80: 71, 81: 72, 82: 73, 84: 74, 85: 75, 86: 76, 87: 77, 88: 78, 89: 79, 90: 80}
class_names = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "trafficlight",
"firehydrant", "stopsign", "parkingmeter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis",
"snowboard", "sportsball", "kite", "baseballbat", "baseballglove", "skateboard", "surfboard",
"tennisracket", "bottle", "wineglass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich",
"orange", "broccoli", "carrot", "hotdog", "pizza", "donut", "cake", "chair", "couch", "pottedplant", "bed",
"diningtable", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cellphone", "microwave", "oven",
"toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddybear", "hairdrier",
"toothbrush"]
boxes = pk.load(open('val_boxes_CC.pkl','rb'))
labels = pk.load(open('val_labels_CC.pkl','rb'))
scores = pk. load(open('val_scores_CC.pkl','rb'))
allids = pk. load(open('allids.pkl','rb'))
with open('cocovaltrue.json', 'r') as f:
gt = json.load(f)
for i in range(1,numVal+1):
fname = 'image_' + str(i)+'.txt'
f = open('input/detection-results/'+fname, 'w')
for p in range(boxes[i-1].shape[0]):
aa = int(labels[i-1][p])
if int(labels[i-1][p])+1 in COCO_id_to_category_id:
uu = COCO_id_to_category_id[int(labels[i-1][p]) + 1] - 1
f.write(class_names[uu] + " ")
else:
f.write(class_names[int(labels[i-1][p])] + " ")
f.write(str(scores[i-1][p])+" ")
f.write(str(boxes[i-1][p][0])+" "+str(boxes[i-1][p][1])+" "+str(boxes[i-1][p][2])+" "+str(boxes[i-1][p][3])+" ")
f.write("\n")
f.close()
allids.append(581781)
for i in range(1,numVal+1):
print(i)
tempID = allids[i-1]
fname = 'image_' + str(i) + '.txt'
gtBBOX = []
gtCat = []
for item in gt['annotations']:
if tempID == item['image_id']:
tempBBOX = item['bbox']
tempBBOX[3] += tempBBOX[1]
tempBBOX[2] += tempBBOX[0]
gtBBOX.append(tempBBOX)
gtCat.append(item['category_id'] - 1)
f = open('input/ground-truth/' + fname, 'w')
for p in range(len(gtBBOX)):
if gtCat[p] + 1 in COCO_id_to_category_id:
uu = COCO_id_to_category_id[gtCat[p] + 1] - 1
f.write(class_names[uu] + " ")
else:
f.write(class_names[gtCat[p]] + " ")
for k in gtBBOX[p]:
f.write(str(k) + " ")
f.write("\n")
f.close() | 3,307 | 48.373134 | 577 | py |
SyNet | SyNet-master/Ensemble/cocoBBOX.py | from ensemble_boxes import *
import pickle as pk
import numpy as np
numVal = 5000
numTrain = 191961
boxesCSC50 = pk.load(open('centcocobox.pkl','rb'))
labelsCSC50 = pk.load(open('centcocolab.pkl','rb'))
scoresCSC50 =pk.load(open('centcocosco.pkl','rb'))
idsc = pk. load(open('centcocoid.pkl','rb'))
boxesCSC101 = pk.load(open('fastcocobox.pkl','rb'))
labelsCSC101 = pk.load(open('fastcocolab.pkl','rb'))
scoresCSC101 =pk.load(open('fastcocosco.pkl','rb'))
idsf = pk. load(open('fastcocoid.pkl','rb'))
for i in range(len(boxesCSC50)):
boxesCSC50[i] = np.ndarray.tolist(boxesCSC50[i])
labelsCSC50[i] = np.ndarray.tolist(labelsCSC50[i])
scoresCSC50[i] = np.ndarray.tolist(scoresCSC50[i])
for i in range(len(boxesCSC101)):
boxesCSC101[i] = np.ndarray.tolist(boxesCSC101[i])
labelsCSC101[i] = np.ndarray.tolist(labelsCSC101[i])
scoresCSC101[i] = np.ndarray.tolist(scoresCSC101[i])
weights = [1, 1]
weights2 = [5, 1]
threshiou = 0.55
threshlow = 0.025
boxesYC = []
scoresYC = []
labelsYC = []
boxesCC = []
scoresCC = []
labelsCC = []
# for i in range(numVal):
# print(i)
# boxes1, scores1, labels1 = weighted_boxes_fusion([boxesCSC101[i], boxesYOLO[i]], [scoresCSC101[i], scoresYOLO[i]],
# [labelsCSC101[i], labelsYOLO[i]], weights=weights, iou_thr=threshiou,
# skip_box_thr=threshlow)
# # boxes2, scores2, labels2 = getBBOX([boxesCSC101[i], boxesYOLO[i]], [scoresCSC101[i], scoresYOLO[i]],
# # [labelsCSC101[i], labelsYOLO[i]], w=weights, threshiou=threshiou,
# # threshlow=threshlow)
#
# boxesYC.append(boxes1)
# scoresYC.append(scores1)
# labelsYC.append(labels1)
# # boxesYC.append(boxes1)
# # scoresYC.append(scores1)
# # labelsYC.append(labels1)
for i in range(numVal):
print(i)
boxes2, scores2, labels2 = weighted_boxes_fusion([boxesCSC101[i], boxesCSC50[i]], [scoresCSC101[i], scoresCSC50[i]],
[labelsCSC101[i], labelsCSC50[i]], weights=weights,
iou_thr=threshiou,
skip_box_thr=threshlow)
# boxes2, scores2, labels2 = getBBOX([boxesCSC101[i], boxesYOLO[i]], [boxesCSC50[i], scoresCSC50[i]],
# [labelsCSC101[i], labelsCSC50[i]], w=weights, threshiou=threshiou,
# threshlow=threshlow)
# boxesCC.append(boxes2)
# scoresCC.append(scores2)
# labelsCC.append(labels2)
boxesCC.append(boxes2)
scoresCC.append(scores2)
labelsCC.append(labels2)
# with open('val_boxes_YC.pkl', 'wb') as f:
# pk.dump(boxesYC, f)
# with open('val_labels_YC.pkl', 'wb') as f:
# pk.dump(labelsYC, f)
# with open('val_scores_YC.pkl', 'wb') as f:
# pk.dump(scoresYC, f)
with open('val_boxes_CC.pkl', 'wb') as f:
pk.dump(boxesCC, f)
with open('val_labels_CC.pkl', 'wb') as f:
pk.dump(labelsCC, f)
with open('val_scores_CC.pkl', 'wb') as f:
pk.dump(scoresCC, f)
deneme = 0
| 3,254 | 35.166667 | 124 | py |
SyNet | SyNet-master/tensorpack/setup.py | from os import path
import setuptools
from setuptools import setup, find_packages
version = int(setuptools.__version__.split('.')[0])
assert version > 30, "Tensorpack installation requires setuptools > 30"
this_directory = path.abspath(path.dirname(__file__))
# setup metainfo
libinfo_py = path.join(this_directory, 'tensorpack', 'libinfo.py')
libinfo_content = open(libinfo_py, "r").readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][0]
exec(version_line) # produce __version__
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
def add_git_version():
def get_git_version():
from subprocess import check_output
try:
return check_output("git describe --tags --long --dirty".split()).decode('utf-8').strip()
except Exception:
return __version__ # noqa
newlibinfo_content = [l for l in libinfo_content if not l.startswith('__git_version__')]
newlibinfo_content.append('__git_version__ = "{}"'.format(get_git_version()))
with open(libinfo_py, "w") as f:
f.write("".join(newlibinfo_content))
add_git_version()
setup(
name='tensorpack',
author="TensorPack contributors",
author_email="[email protected]",
url="https://github.com/tensorpack/tensorpack",
keywords="tensorflow, deep learning, neural network",
license="Apache",
version=__version__, # noqa
description='A Neural Network Training Interface on TensorFlow',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=["examples", "tests"]),
zip_safe=False, # dataset and __init__ use file
install_requires=[
"numpy>=1.14",
"six",
"termcolor>=1.1",
"tabulate>=0.7.7",
"tqdm>4.29.0",
"msgpack>=0.5.2",
"msgpack-numpy>=0.4.4.2",
"pyzmq>=16",
"psutil>=5",
],
tests_require=['flake8', 'scikit-image'],
extras_require={
'all': ['scipy', 'h5py', 'lmdb>=0.92', 'matplotlib', 'scikit-learn'],
'all: "linux" in sys_platform': ['python-prctl'],
},
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#universal-wheels
options={'bdist_wheel': {'universal': '1'}},
)
| 2,356 | 30.851351 | 101 | py |
SyNet | SyNet-master/tensorpack/examples/boilerplate.py | # -*- coding: utf-8 -*-
# Author: Your Name <[email protected]>
import argparse
import os
import tensorflow as tf
from tensorpack import *
"""
This is a boiler-plate template.
All code is in this file is the most minimalistic way to solve a deep-learning problem with cross-validation.
"""
BATCH_SIZE = 16
SHAPE = 28
CHANNELS = 3
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec((None, SHAPE, SHAPE, CHANNELS), tf.float32, 'input1'),
tf.TensorSpec((None,), tf.int32, 'input2')]
def build_graph(self, input1, input2):
cost = tf.identity(input1 - input2, name='total_costs')
summary.add_moving_summary(cost)
return cost
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=5e-3, trainable=False)
return tf.train.AdamOptimizer(lr)
def get_data(subset):
# something that yields [[SHAPE, SHAPE, CHANNELS], [1]]
ds = FakeData([[SHAPE, SHAPE, CHANNELS], [1]], 1000, random=False,
dtype=['float32', 'uint8'], domain=[(0, 255), (0, 10)])
ds = MultiProcessRunnerZMQ(ds, 2)
ds = BatchData(ds, BATCH_SIZE)
return ds
def get_config():
logger.auto_set_dir()
ds_train = get_data('train')
ds_test = get_data('test')
return TrainConfig(
model=Model(),
data=QueueInput(ds_train),
callbacks=[
ModelSaver(),
InferenceRunner(ds_test, [ScalarStats('total_costs')]),
],
steps_per_epoch=len(ds_train),
max_epoch=100,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config()
config.session_init = SmartInit(args.load)
launch_train_with_config(config, SimpleTrainer())
| 1,979 | 25.052632 | 109 | py |
SyNet | SyNet-master/tensorpack/examples/basics/cifar-convnet.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: cifar-convnet.py
# Author: Yuxin Wu
import argparse
import os
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils.summary import *
from tensorpack.utils.gpu import get_num_gpu
"""
A small convnet model for Cifar10 or Cifar100 dataset.
Cifar10 trained on 1 GPU:
91% accuracy after 50k iterations.
79 itr/s on P100
Not a good model for Cifar100, just for demonstration.
"""
class Model(ModelDesc):
def __init__(self, cifar_classnum):
super(Model, self).__init__()
self.cifar_classnum = cifar_classnum
def inputs(self):
return [tf.TensorSpec((None, 30, 30, 3), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]
def build_graph(self, image, label):
drop_rate = tf.constant(0.5 if self.training else 0.0)
if self.training:
tf.summary.image("train_image", image, 10)
if tf.test.is_gpu_available():
image = tf.transpose(image, [0, 3, 1, 2])
data_format = 'channels_first'
else:
data_format = 'channels_last'
image = image / 4.0 # just to make range smaller
with argscope(Conv2D, activation=BNReLU, use_bias=False, kernel_size=3), \
argscope([Conv2D, MaxPooling, BatchNorm], data_format=data_format):
logits = LinearWrap(image) \
.Conv2D('conv1.1', filters=64) \
.Conv2D('conv1.2', filters=64) \
.MaxPooling('pool1', 3, stride=2, padding='SAME') \
.Conv2D('conv2.1', filters=128) \
.Conv2D('conv2.2', filters=128) \
.MaxPooling('pool2', 3, stride=2, padding='SAME') \
.Conv2D('conv3.1', filters=128, padding='VALID') \
.Conv2D('conv3.2', filters=128, padding='VALID') \
.FullyConnected('fc0', 1024 + 512, activation=tf.nn.relu) \
.Dropout(rate=drop_rate) \
.FullyConnected('fc1', 512, activation=tf.nn.relu) \
.FullyConnected('linear', out_dim=self.cifar_classnum)()
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
correct = tf.cast(tf.nn.in_top_k(predictions=logits, targets=label, k=1), tf.float32, name='correct')
# monitor training error
add_moving_summary(tf.reduce_mean(correct, name='accuracy'))
# weight decay on all W of fc layers
wd_cost = regularize_cost('fc.*/W', l2_regularizer(4e-4), name='regularize_loss')
add_moving_summary(cost, wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
return tf.add_n([cost, wd_cost], name='cost')
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=1e-2, trainable=False)
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr, epsilon=1e-3)
def get_data(train_or_test, cifar_classnum):
isTrain = train_or_test == 'train'
if cifar_classnum == 10:
ds = dataset.Cifar10(train_or_test)
else:
ds = dataset.Cifar100(train_or_test)
if isTrain:
augmentors = [
imgaug.RandomCrop((30, 30)),
imgaug.Flip(horiz=True),
imgaug.Brightness(63),
imgaug.Contrast((0.2, 1.8)),
imgaug.MeanVarianceNormalize(all_channel=True)
]
else:
augmentors = [
imgaug.CenterCrop((30, 30)),
imgaug.MeanVarianceNormalize(all_channel=True)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, 128, remainder=not isTrain)
if isTrain:
ds = MultiProcessRunnerZMQ(ds, 5)
return ds
def get_config(cifar_classnum):
# prepare dataset
dataset_train = get_data('train', cifar_classnum)
dataset_test = get_data('test', cifar_classnum)
def lr_func(lr):
if lr < 3e-5:
raise StopTraining()
return lr * 0.31
return TrainConfig(
model=Model(cifar_classnum),
data=QueueInput(dataset_train),
callbacks=[
ModelSaver(),
InferenceRunner(dataset_test,
ScalarStats(['accuracy', 'cost'])),
StatMonitorParamSetter('learning_rate', 'validation_accuracy', lr_func,
threshold=0.001, last_k=10, reverse=True),
],
max_epoch=150,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--classnum', help='10 for cifar10 or 100 for cifar100',
type=int, default=10)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
with tf.Graph().as_default():
logger.set_logger_dir(os.path.join('train_log', 'cifar' + str(args.classnum)))
config = get_config(args.classnum)
config.session_init = SmartInit(args.load)
num_gpu = get_num_gpu()
trainer = SimpleTrainer() if num_gpu <= 1 \
else SyncMultiGPUTrainerParameterServer(num_gpu)
launch_train_with_config(config, trainer)
| 5,396 | 34.506579 | 109 | py |
SyNet | SyNet-master/tensorpack/examples/basics/svhn-digit-convnet.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: svhn-digit-convnet.py
# Author: Yuxin Wu
import argparse
import os
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils.summary import *
"""
A very small SVHN convnet model (only 0.8m parameters).
About 2.3% validation error after 70 epochs. 2.15% after 150 epochs.
Each epoch iterates over the whole training set (4721 iterations), and takes about 24s on a P100.
"""
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec([None, 40, 40, 3], tf.float32, 'input'),
tf.TensorSpec([None], tf.int32, 'label')]
def build_graph(self, image, label):
image = image / 128.0 - 1
with argscope(Conv2D, activation=BNReLU, use_bias=False):
logits = (LinearWrap(image)
.Conv2D('conv1', 24, 5, padding='VALID')
.MaxPooling('pool1', 2, padding='SAME')
.Conv2D('conv2', 32, 3, padding='VALID')
.Conv2D('conv3', 32, 3, padding='VALID')
.MaxPooling('pool2', 2, padding='SAME')
.Conv2D('conv4', 64, 3, padding='VALID')
.Dropout('drop', rate=0.5)
.FullyConnected('fc0', 512,
bias_initializer=tf.constant_initializer(0.1),
activation=tf.nn.relu)
.FullyConnected('linear', units=10)())
tf.nn.softmax(logits, name='output')
accuracy = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32)
add_moving_summary(tf.reduce_mean(accuracy, name='accuracy'))
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wd_cost = regularize_cost('fc.*/W', l2_regularizer(0.00001))
add_moving_summary(cost, wd_cost)
add_param_summary(('.*/W', ['histogram', 'rms'])) # monitor W
return tf.add_n([cost, wd_cost], name='cost')
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=4721 * 60,
decay_rate=0.2, staircase=True, name='learning_rate')
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr)
def get_data():
d1 = dataset.SVHNDigit('train')
d2 = dataset.SVHNDigit('extra')
data_train = RandomMixData([d1, d2])
data_test = dataset.SVHNDigit('test', shuffle=False)
augmentors = [
imgaug.Resize((40, 40)),
imgaug.Brightness(30),
imgaug.Contrast((0.5, 1.5)),
]
data_train = AugmentImageComponent(data_train, augmentors)
data_train = BatchData(data_train, 128)
data_train = MultiProcessRunner(data_train, 5, 5)
augmentors = [imgaug.Resize((40, 40))]
data_test = AugmentImageComponent(data_test, augmentors)
data_test = BatchData(data_test, 128, remainder=True)
return data_train, data_test
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
logger.auto_set_dir()
data_train, data_test = get_data()
config = TrainConfig(
model=Model(),
data=QueueInput(data_train),
callbacks=[
ModelSaver(),
InferenceRunner(data_test,
ScalarStats(['cost', 'accuracy']))
],
max_epoch=350,
session_init=SmartInit(args.load)
)
launch_train_with_config(config, SimpleTrainer())
| 3,850 | 33.079646 | 97 | py |
SyNet | SyNet-master/tensorpack/examples/basics/mnist-tflayers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist-tflayers.py
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils import summary
"""
MNIST ConvNet example using tf.layers
Mostly the same as 'mnist-convnet.py',
the only differences are:
1. use tf.layers
2. use tf.layers variable names to summarize weights
"""
IMAGE_SIZE = 28
# Monkey-patch tf.layers to support argscope.
enable_argscope_for_module(tf.layers)
class Model(ModelDesc):
def inputs(self):
"""
Define all the inputs (with type, shape, name) that the graph will need.
"""
return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]
def build_graph(self, image, label):
"""This function should build the model which takes the input variables
and return cost at the end"""
# In tensorflow, inputs to convolution function are assumed to be
# NHWC. Add a single channel here.
image = tf.expand_dims(image, 3)
image = image * 2 - 1 # center the pixels values at zero
# The context manager `argscope` sets the default option for all the layers under
# this context. Here we use 32 channel convolution with shape 3x3
with argscope([tf.layers.conv2d], padding='same', activation=tf.nn.relu):
l = tf.layers.conv2d(image, 32, 3, name='conv0')
l = tf.layers.max_pooling2d(l, 2, 2, padding='valid')
l = tf.layers.conv2d(l, 32, 3, name='conv1')
l = tf.layers.conv2d(l, 32, 3, name='conv2')
l = tf.layers.max_pooling2d(l, 2, 2, padding='valid')
l = tf.layers.conv2d(l, 32, 3, name='conv3')
l = tf.layers.flatten(l)
l = tf.layers.dense(l, 512, activation=tf.nn.relu, name='fc0')
l = tf.layers.dropout(l, rate=0.5, training=self.training)
logits = tf.layers.dense(l, 10, activation=tf.identity, name='fc1')
# a vector of length B with loss of each sample
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
correct = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32, name='correct')
accuracy = tf.reduce_mean(correct, name='accuracy')
# This will monitor training error & accuracy (in a moving average fashion). The value will be automatically
# 1. written to tensosrboard
# 2. written to stat.json
# 3. printed after each epoch
train_error = tf.reduce_mean(1 - correct, name='train_error')
summary.add_moving_summary(train_error, accuracy)
# Use a regex to find parameters to apply weight decay.
# Here we apply a weight decay on all W (weight matrix) of all fc layers
# If you don't like regex, you can certainly define the cost in any other methods.
wd_cost = tf.multiply(1e-5,
regularize_cost('fc.*/kernel', tf.nn.l2_loss),
name='regularize_loss')
total_cost = tf.add_n([wd_cost, cost], name='total_cost')
summary.add_moving_summary(cost, wd_cost, total_cost)
# monitor histogram of all weight (of conv and fc layers) in tensorboard
summary.add_param_summary(('.*/kernel', ['histogram', 'rms']))
# the function should return the total cost to be optimized
return total_cost
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=468 * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
# This will also put the summary in tensorboard, stat.json and print in terminal
# but this time without moving average
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr)
def get_data():
train = BatchData(dataset.Mnist('train'), 128)
test = BatchData(dataset.Mnist('test'), 256, remainder=True)
return train, test
if __name__ == '__main__':
# automatically setup the directory train_log/mnist-convnet for logging
logger.auto_set_dir()
dataset_train, dataset_test = get_data()
# How many iterations you want in each epoch.
# This len(data) is the default value.
steps_per_epoch = len(dataset_train)
# get the config which contains everything necessary in a training
config = TrainConfig(
model=Model(),
# The input source for training. FeedInput is slow, this is just for demo purpose.
# In practice it's best to use QueueInput or others. See tutorials for details.
data=FeedInput(dataset_train),
callbacks=[
ModelSaver(), # save the model after every epoch
InferenceRunner( # run inference(for validation) after every epoch
dataset_test, # the DataFlow instance used for validation
ScalarStats(['cross_entropy_loss', 'accuracy'])),
MaxSaver('validation_accuracy'), # save the model with highest accuracy (prefix 'validation_')
],
steps_per_epoch=steps_per_epoch,
max_epoch=100,
)
launch_train_with_config(config, SimpleTrainer())
| 5,392 | 40.806202 | 116 | py |
SyNet | SyNet-master/tensorpack/examples/basics/mnist-convnet.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist-convnet.py
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils import summary
"""
MNIST ConvNet example.
about 0.6% validation error after 30 epochs.
"""
IMAGE_SIZE = 28
class Model(ModelDesc):
# See tutorial at https://tensorpack.readthedocs.io/tutorial/training-interface.html#with-modeldesc-and-trainconfig
def inputs(self):
"""
Define all the inputs (with type, shape, name) that the graph will need.
"""
return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]
def build_graph(self, image, label):
"""This function should build the model which takes the input variables (defined above)
and return cost at the end."""
# In tensorflow, inputs to convolution function are assumed to be
# NHWC. Add a single channel here.
image = tf.expand_dims(image, 3)
image = image * 2 - 1 # center the pixels values at zero
# The context manager `argscope` sets the default option for all the layers under
# this context. Here we use 32 channel convolution with shape 3x3
# See tutorial at https://tensorpack.readthedocs.io/tutorial/symbolic.html
with argscope(Conv2D, kernel_size=3, activation=tf.nn.relu, filters=32):
# LinearWrap is just a syntax sugar.
# See tutorial at https://tensorpack.readthedocs.io/tutorial/symbolic.html
logits = (LinearWrap(image)
.Conv2D('conv0')
.MaxPooling('pool0', 2)
.Conv2D('conv1')
.Conv2D('conv2')
.MaxPooling('pool1', 2)
.Conv2D('conv3')
.FullyConnected('fc0', 512, activation=tf.nn.relu)
.Dropout('dropout', rate=0.5)
.FullyConnected('fc1', 10, activation=tf.identity)())
# a vector of length B with loss of each sample
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
correct = tf.cast(tf.nn.in_top_k(predictions=logits, targets=label, k=1), tf.float32, name='correct')
accuracy = tf.reduce_mean(correct, name='accuracy')
# This will monitor training error & accuracy (in a moving average fashion). The value will be automatically
# 1. written to tensosrboard
# 2. written to stat.json
# 3. printed after each epoch
# You can also just call `tf.summary.scalar`. But moving summary has some other benefits.
# See tutorial at https://tensorpack.readthedocs.io/tutorial/summary.html
train_error = tf.reduce_mean(1 - correct, name='train_error')
summary.add_moving_summary(train_error, accuracy)
# Use a regex to find parameters to apply weight decay.
# Here we apply a weight decay on all W (weight matrix) of all fc layers
# If you don't like regex, you can certainly define the cost in any other methods.
wd_cost = tf.multiply(1e-5,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
total_cost = tf.add_n([wd_cost, cost], name='total_cost')
summary.add_moving_summary(cost, wd_cost, total_cost)
# monitor histogram of all weight (of conv and fc layers) in tensorboard
summary.add_param_summary(('.*/W', ['histogram', 'rms']))
# the function should return the total cost to be optimized
return total_cost
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=468 * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
# This will also put the summary in tensorboard, stat.json and print in terminal,
# but this time without moving average
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr)
def get_data():
# We don't need any fancy data loading for this simple example.
# See dataflow tutorial at https://tensorpack.readthedocs.io/tutorial/dataflow.html
train = BatchData(dataset.Mnist('train'), 128)
test = BatchData(dataset.Mnist('test'), 256, remainder=True)
train = PrintData(train)
return train, test
if __name__ == '__main__':
# automatically setup the directory train_log/mnist-convnet for logging
logger.auto_set_dir()
dataset_train, dataset_test = get_data()
# How many iterations you want in each epoch.
# This len(data) is the default value.
steps_per_epoch = len(dataset_train)
# get the config which contains everything necessary in a training
config = TrainConfig(
model=Model(),
# The input source for training. FeedInput is slow, this is just for demo purpose.
# In practice it's best to use QueueInput or others.
# See tutorial at https://tensorpack.readthedocs.io/tutorial/extend/input-source.html
data=FeedInput(dataset_train),
# We use a few simple callbacks in this demo.
# See tutorial at https://tensorpack.readthedocs.io/tutorial/callback.html
callbacks=[
ModelSaver(), # save the model after every epoch
InferenceRunner( # run inference(for validation) after every epoch
dataset_test, # the DataFlow instance used for validation
ScalarStats( # produce `val_accuracy` and `val_cross_entropy_loss`
['cross_entropy_loss', 'accuracy'], prefix='val')),
# MaxSaver needs to come after InferenceRunner to obtain its score
MaxSaver('val_accuracy'), # save the model with highest accuracy
],
steps_per_epoch=steps_per_epoch,
max_epoch=100,
)
# Use a simple trainer in this demo.
# More trainers with multi-gpu or distributed functionalities are available.
# See tutorial at https://tensorpack.readthedocs.io/tutorial/trainer.html
launch_train_with_config(config, SimpleTrainer())
| 6,339 | 43.647887 | 119 | py |
SyNet | SyNet-master/tensorpack/examples/basics/mnist-visualizations.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist-visualizations.py
"""
The same MNIST ConvNet example, but with weights/activations visualization.
"""
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
IMAGE_SIZE = 28
def visualize_conv_weights(filters, name):
"""Visualize use weights in convolution filters.
Args:
filters: tensor containing the weights [H,W,Cin,Cout]
name: label for tensorboard
Returns:
image of all weight
"""
with tf.name_scope('visualize_w_' + name):
filters = tf.transpose(filters, (3, 2, 0, 1)) # [h, w, cin, cout] -> [cout, cin, h, w]
filters = tf.unstack(filters) # --> cout * [cin, h, w]
filters = tf.concat(filters, 1) # --> [cin, cout * h, w]
filters = tf.unstack(filters) # --> cin * [cout * h, w]
filters = tf.concat(filters, 1) # --> [cout * h, cin * w]
filters = tf.expand_dims(filters, 0)
filters = tf.expand_dims(filters, -1)
tf.summary.image('visualize_w_' + name, filters)
def visualize_conv_activations(activation, name):
"""Visualize activations for convolution layers.
Remarks:
This tries to place all activations into a square.
Args:
activation: tensor with the activation [B,H,W,C]
name: label for tensorboard
Returns:
image of almost all activations
"""
import math
with tf.name_scope('visualize_act_' + name):
_, h, w, c = activation.get_shape().as_list()
rows = []
c_per_row = int(math.sqrt(c))
for y in range(0, c - c_per_row, c_per_row):
row = activation[:, :, :, y:y + c_per_row] # [?, H, W, 32] --> [?, H, W, 5]
cols = tf.unstack(row, axis=3) # [?, H, W, 5] --> 5 * [?, H, W]
row = tf.concat(cols, 1)
rows.append(row)
viz = tf.concat(rows, 2)
tf.summary.image('visualize_act_' + name, tf.expand_dims(viz, -1))
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]
def build_graph(self, image, label):
image = tf.expand_dims(image * 2 - 1, 3)
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
c0 = Conv2D('conv0', image)
p0 = MaxPooling('pool0', c0, 2)
c1 = Conv2D('conv1', p0)
c2 = Conv2D('conv2', c1)
p1 = MaxPooling('pool1', c2, 2)
c3 = Conv2D('conv3', p1)
fc1 = FullyConnected('fc0', c3, 512, nl=tf.nn.relu)
fc1 = Dropout('dropout', fc1, 0.5)
logits = FullyConnected('fc1', fc1, out_dim=10, nl=tf.identity)
with tf.name_scope('visualizations'):
visualize_conv_weights(c0.variables.W, 'conv0')
visualize_conv_activations(c0, 'conv0')
visualize_conv_weights(c1.variables.W, 'conv1')
visualize_conv_activations(c1, 'conv1')
visualize_conv_weights(c2.variables.W, 'conv2')
visualize_conv_activations(c2, 'conv2')
visualize_conv_weights(c3.variables.W, 'conv3')
visualize_conv_activations(c3, 'conv3')
tf.summary.image('input', (image + 1.0) * 128., 3)
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32), name='accuracy')
wd_cost = tf.multiply(1e-5,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
return tf.add_n([wd_cost, cost], name='total_cost')
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=468 * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr)
def get_data():
train = BatchData(dataset.Mnist('train'), 128)
test = BatchData(dataset.Mnist('test'), 256, remainder=True)
return train, test
if __name__ == '__main__':
logger.auto_set_dir()
dataset_train, dataset_test = get_data()
config = TrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=[
ModelSaver(),
InferenceRunner(
dataset_test, ScalarStats(['cross_entropy_loss', 'accuracy'])),
],
steps_per_epoch=len(dataset_train),
max_epoch=100,
)
launch_train_with_config(config, SimpleTrainer())
| 4,841 | 33.340426 | 96 | py |
SyNet | SyNet-master/tensorpack/examples/basics/export-model.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import cv2
import tensorflow as tf
from tensorpack import *
from tensorpack.tfutils.export import ModelExporter
"""
This example illustrates the process of exporting a model trained in Tensorpack to:
- SavedModel format for TensorFlow Serving
- A frozen and pruned inference graph (compact)
The model applies a laplace filter to the input image.
The steps are:
1. train the model by
python export-model.py
2. export the model by
python export-model.py --export serving --load train_log/export/checkpoint
python export-model.py --export compact --load train_log/export/checkpoint
3. run inference by
python export-model.py --apply default --load train_log/export/checkpoint
python export-model.py --apply inference_graph --load train_log/export/checkpoint
python export-model.py --apply compact --load /tmp/compact_graph.pb
"""
SHAPE = 256
CHANNELS = 3
class Model(ModelDesc):
"""Just a simple model, which applies the Laplacian-operation to images to showcase
the usage of variables, and alternating the inference-graph later.
"""
def inputs(self):
return [tf.TensorSpec((None, SHAPE, SHAPE, CHANNELS), tf.uint8, 'input_img'),
tf.TensorSpec((None, SHAPE, SHAPE, CHANNELS), tf.uint8, 'target_img')]
def make_prediction(self, img):
img = tf.cast(img, tf.float32)
img = tf.image.rgb_to_grayscale(img)
k = tf.get_variable('filter', dtype=tf.float32,
initializer=[[[[0.]], [[1.]], [[0.]]], [
[[1.]], [[-4.]], [[1.]]], [[[0.]], [[1.]], [[0.]]]])
prediction_img = tf.nn.conv2d(img, k, strides=[1, 1, 1, 1], padding='SAME')
return prediction_img
def build_graph(self, input_img, target_img):
target_img = tf.cast(target_img, tf.float32)
target_img = tf.image.rgb_to_grayscale(target_img)
self.prediction_img = tf.identity(self.make_prediction(input_img), name='prediction_img')
cost = tf.losses.mean_squared_error(target_img, self.prediction_img,
reduction=tf.losses.Reduction.MEAN)
return tf.identity(cost, name='total_costs')
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.0, trainable=False)
return tf.train.AdamOptimizer(lr)
def get_data(subset):
ds = FakeData([[SHAPE, SHAPE, CHANNELS], [SHAPE, SHAPE, CHANNELS]], 1000, random=False,
dtype=['uint8', 'uint8'], domain=[(0, 255), (0, 10)])
ds = BatchData(ds, 1)
return ds
class InferenceOnlyModel(Model):
"""Recreate a different inference graph to accept images encoded as png. """
def inputs(self):
# The inference graph only accepts a single image, which is different to the training model.
return [tf.TensorSpec((None,), tf.string, 'input_img_bytes')]
def build_graph(self, input_img_bytes):
# prepare input (png encoded strings to images)
input_img = tf.map_fn(lambda x: tf.image.decode_png(x, channels=3), input_img_bytes, dtype=tf.uint8)
# just copy the relevant parts to this graph.
prediction_img = self.make_prediction(input_img)
# outputs should be png encoded strings agains
prediction_img = tf.clip_by_value(prediction_img, 0, 255)
prediction_img = tf.cast(prediction_img, tf.uint8)
prediction_img_bytes = tf.map_fn(tf.image.encode_png, prediction_img, dtype=tf.string)
tf.identity(prediction_img_bytes, name='prediction_img_bytes')
def export_serving(model_path):
"""Export trained model to use it in TensorFlow Serving or cloudML. """
pred_config = PredictConfig(
session_init=SmartInit(model_path),
model=InferenceOnlyModel(),
input_names=['input_img_bytes'],
output_names=['prediction_img_bytes'])
ModelExporter(pred_config).export_serving('/tmp/exported')
def export_compact(model_path):
"""Export trained model to use it as a frozen and pruned inference graph in
mobile applications. """
pred_config = PredictConfig(
session_init=SmartInit(model_path),
model=Model(),
input_names=['input_img'],
output_names=['prediction_img'])
ModelExporter(pred_config).export_compact('/tmp/compact_graph.pb')
def apply(model_path):
"""Run inference from a training model checkpoint. """
pred_config = PredictConfig(
session_init=SmartInit(model_path),
model=Model(),
input_names=['input_img'],
output_names=['prediction_img'])
pred = OfflinePredictor(pred_config)
img = cv2.imread('lena.png')
prediction = pred([img])[0]
cv2.imwrite('applied_default.jpg', prediction[0])
def apply_inference_graph(model_path):
"""Run inference from a different graph, which receives encoded images buffers. """
pred_config = PredictConfig(
session_init=SmartInit(model_path),
model=InferenceOnlyModel(),
input_names=['input_img_bytes'],
output_names=['prediction_img_bytes'])
pred = OfflinePredictor(pred_config)
buf = open('lena.png', 'rb').read()
prediction = pred([buf])[0]
with open('applied_inference_graph.png', 'wb') as f:
f.write(prediction[0])
def apply_compact(graph_path):
"""Run the pruned and frozen inference graph. """
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Note, we just load the graph and do *not* need to initialize anything.
with tf.gfile.GFile(graph_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def)
input_img = sess.graph.get_tensor_by_name('import/input_img:0')
prediction_img = sess.graph.get_tensor_by_name('import/prediction_img:0')
prediction = sess.run(prediction_img, {input_img: cv2.imread('lena.png')[None, ...]})
cv2.imwrite('applied_compact.png', prediction[0])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load model')
parser.add_argument('--apply', help='run sampling', default='',
choices=['default', 'inference_graph', 'compact'])
parser.add_argument('--export', help='export the model', default='',
choices=['serving', 'compact'])
args = parser.parse_args()
if args.apply != '':
if args.apply == 'default':
apply(args.load)
elif args.apply == 'inference_graph':
apply_inference_graph(args.load)
else:
apply_compact(args.load)
elif args.export != '':
if args.export == 'serving':
export_serving(args.load)
else:
export_compact(args.load)
else:
logger.auto_set_dir()
ds_train = get_data('train')
config = TrainConfig(
model=Model(),
data=QueueInput(ds_train),
callbacks=[
ModelSaver(),
],
steps_per_epoch=1,
max_epoch=1,
)
launch_train_with_config(config, SimpleTrainer())
| 7,254 | 33.712919 | 108 | py |
SyNet | SyNet-master/tensorpack/examples/basics/mnist-tfslim.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist-tfslim.py
"""
MNIST ConvNet example using TensorFlow-slim.
Mostly the same as 'mnist-convnet.py',
the only differences are:
1. use slim.layers, slim.arg_scope, etc
2. use slim names to summarize weights
"""
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorpack import *
from tensorpack.dataflow import dataset
IMAGE_SIZE = 28
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]
def build_graph(self, image, label):
image = tf.expand_dims(image, 3)
image = image * 2 - 1
with slim.arg_scope([slim.layers.fully_connected],
weights_regularizer=slim.l2_regularizer(1e-5)):
l = slim.layers.conv2d(image, 32, [3, 3], scope='conv0')
l = slim.layers.max_pool2d(l, [2, 2], scope='pool0')
l = slim.layers.conv2d(l, 32, [3, 3], padding='SAME', scope='conv1')
l = slim.layers.conv2d(l, 32, [3, 3], scope='conv2')
l = slim.layers.max_pool2d(l, [2, 2], scope='pool1')
l = slim.layers.conv2d(l, 32, [3, 3], scope='conv3')
l = slim.layers.flatten(l, scope='flatten')
l = slim.layers.fully_connected(l, 512, scope='fc0')
l = slim.layers.dropout(l, is_training=self.training)
logits = slim.layers.fully_connected(l, 10, activation_fn=None, scope='fc1')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
acc = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32)
acc = tf.reduce_mean(acc, name='accuracy')
summary.add_moving_summary(acc)
summary.add_moving_summary(cost)
summary.add_param_summary(('.*/weights', ['histogram', 'rms'])) # slim uses different variable names
return cost + regularize_cost_from_collection()
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=468 * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr)
def get_data():
train = BatchData(dataset.Mnist('train'), 128)
test = BatchData(dataset.Mnist('test'), 256, remainder=True)
return train, test
if __name__ == '__main__':
logger.auto_set_dir()
dataset_train, dataset_test = get_data()
config = TrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=[
ModelSaver(),
InferenceRunner(
dataset_test,
ScalarStats(['cross_entropy_loss', 'accuracy'])),
],
max_epoch=100,
)
launch_train_with_config(config, SimpleTrainer())
| 3,003 | 32.377778 | 109 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/data.py | # -*- coding: utf-8 -*-
# File: data.py
import copy
import itertools
import numpy as np
import cv2
from tabulate import tabulate
from termcolor import colored
from tensorpack.dataflow import (
DataFromList, MapData, MapDataComponent,
MultiProcessMapData, MultiThreadMapData, TestDataSpeed, imgaug,
)
from tensorpack.utils import logger
from tensorpack.utils.argtools import log_once
from modeling.model_rpn import get_all_anchors
from modeling.model_fpn import get_all_anchors_fpn
from common import (
CustomResize, DataFromListOfDict, box_to_point4,
filter_boxes_inside_shape, np_iou, point4_to_box, polygons_to_mask,
)
from config import config as cfg
from dataset import DatasetRegistry, register_coco
from utils.np_box_ops import area as np_area
from utils.np_box_ops import ioa as np_ioa
# import tensorpack.utils.viz as tpviz
class MalformedData(BaseException):
pass
def print_class_histogram(roidbs):
"""
Args:
roidbs (list[dict]): the same format as the output of `training_roidbs`.
"""
class_names = DatasetRegistry.get_metadata(cfg.DATA.TRAIN[0], 'class_names')
# labels are in [1, NUM_CATEGORY], hence +2 for bins
hist_bins = np.arange(cfg.DATA.NUM_CATEGORY + 2)
# Histogram of ground-truth objects
gt_hist = np.zeros((cfg.DATA.NUM_CATEGORY + 1,), dtype=np.int)
for entry in roidbs:
# filter crowd?
gt_inds = np.where((entry["class"] > 0) & (entry["is_crowd"] == 0))[0]
gt_classes = entry["class"][gt_inds]
if len(gt_classes):
assert gt_classes.max() <= len(class_names) - 1
gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]
data = list(itertools.chain(*[[class_names[i + 1], v] for i, v in enumerate(gt_hist[1:])]))
COL = min(6, len(data))
total_instances = sum(data[1::2])
data.extend([None] * ((COL - len(data) % COL) % COL))
data.extend(["total", total_instances])
data = itertools.zip_longest(*[data[i::COL] for i in range(COL)])
# the first line is BG
table = tabulate(data, headers=["class", "#box"] * (COL // 2), tablefmt="pipe", stralign="center", numalign="left")
logger.info("Ground-Truth category distribution:\n" + colored(table, "cyan"))
class TrainingDataPreprocessor:
"""
The mapper to preprocess the input data for training.
Since the mapping may run in other processes, we write a new class and
explicitly pass cfg to it, in the spirit of "explicitly pass resources to subprocess".
"""
def __init__(self, cfg):
self.cfg = cfg
self.aug = imgaug.AugmentorList([
CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE),
imgaug.Flip(horiz=True)
])
def __call__(self, roidb):
fname, boxes, klass, is_crowd = roidb["file_name"], roidb["boxes"], roidb["class"], roidb["is_crowd"]
assert boxes.ndim == 2 and boxes.shape[1] == 4, boxes.shape
boxes = np.copy(boxes)
im = cv2.imread(fname, cv2.IMREAD_COLOR)
assert im is not None, fname
im = im.astype("float32")
height, width = im.shape[:2]
# assume floatbox as input
assert boxes.dtype == np.float32, "Loader has to return float32 boxes!"
if not self.cfg.DATA.ABSOLUTE_COORD:
boxes[:, 0::2] *= width
boxes[:, 1::2] *= height
# augmentation:
tfms = self.aug.get_transform(im)
im = tfms.apply_image(im)
points = box_to_point4(boxes)
points = tfms.apply_coords(points)
boxes = point4_to_box(points)
if len(boxes):
assert klass.max() <= self.cfg.DATA.NUM_CATEGORY, \
"Invalid category {}!".format(klass.max())
assert np.min(np_area(boxes)) > 0, "Some boxes have zero area!"
ret = {"image": im}
# Add rpn data to dataflow:
try:
if self.cfg.MODE_FPN:
multilevel_anchor_inputs = self.get_multilevel_rpn_anchor_input(im, boxes, is_crowd)
for i, (anchor_labels, anchor_boxes) in enumerate(multilevel_anchor_inputs):
ret["anchor_labels_lvl{}".format(i + 2)] = anchor_labels
ret["anchor_boxes_lvl{}".format(i + 2)] = anchor_boxes
else:
ret["anchor_labels"], ret["anchor_boxes"] = self.get_rpn_anchor_input(im, boxes, is_crowd)
boxes = boxes[is_crowd == 0] # skip crowd boxes in training target
klass = klass[is_crowd == 0]
ret["gt_boxes"] = boxes
ret["gt_labels"] = klass
except MalformedData as e:
log_once("Input {} is filtered for training: {}".format(fname, str(e)), "warn")
return None
if self.cfg.MODE_MASK:
# augmentation will modify the polys in-place
segmentation = copy.deepcopy(roidb["segmentation"])
segmentation = [segmentation[k] for k in range(len(segmentation)) if not is_crowd[k]]
assert len(segmentation) == len(boxes)
# Apply augmentation on polygon coordinates.
# And produce one image-sized binary mask per box.
masks = []
width_height = np.asarray([width, height], dtype=np.float32)
gt_mask_width = int(np.ceil(im.shape[1] / 8.0) * 8) # pad to 8 in order to pack mask into bits
for polys in segmentation:
if not self.cfg.DATA.ABSOLUTE_COORD:
polys = [p * width_height for p in polys]
polys = [tfms.apply_coords(p) for p in polys]
masks.append(polygons_to_mask(polys, im.shape[0], gt_mask_width))
if len(masks):
masks = np.asarray(masks, dtype='uint8') # values in {0, 1}
masks = np.packbits(masks, axis=-1)
else: # no gt on the image
masks = np.zeros((0, im.shape[0], gt_mask_width // 8), dtype='uint8')
ret['gt_masks_packed'] = masks
# from viz import draw_annotation, draw_mask
# viz = draw_annotation(im, boxes, klass)
# for mask in masks:
# viz = draw_mask(viz, mask)
# tpviz.interactive_imshow(viz)
return ret
def get_rpn_anchor_input(self, im, boxes, is_crowd):
"""
Args:
im: an image
boxes: nx4, floatbox, gt. shoudn't be changed
is_crowd: n,
Returns:
The anchor labels and target boxes for each pixel in the featuremap.
fm_labels: fHxfWxNA
fm_boxes: fHxfWxNAx4
NA will be NUM_ANCHOR_SIZES x NUM_ANCHOR_RATIOS
"""
boxes = boxes.copy()
all_anchors = np.copy(
get_all_anchors(
stride=self.cfg.RPN.ANCHOR_STRIDE,
sizes=self.cfg.RPN.ANCHOR_SIZES,
ratios=self.cfg.RPN.ANCHOR_RATIOS,
max_size=self.cfg.PREPROC.MAX_SIZE,
)
)
# fHxfWxAx4 -> (-1, 4)
featuremap_anchors_flatten = all_anchors.reshape((-1, 4))
# only use anchors inside the image
inside_ind, inside_anchors = filter_boxes_inside_shape(featuremap_anchors_flatten, im.shape[:2])
# obtain anchor labels and their corresponding gt boxes
anchor_labels, anchor_gt_boxes = self.get_anchor_labels(
inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1]
)
# Fill them back to original size: fHxfWx1, fHxfWx4
num_anchor = self.cfg.RPN.NUM_ANCHOR
anchorH, anchorW = all_anchors.shape[:2]
featuremap_labels = -np.ones((anchorH * anchorW * num_anchor,), dtype="int32")
featuremap_labels[inside_ind] = anchor_labels
featuremap_labels = featuremap_labels.reshape((anchorH, anchorW, num_anchor))
featuremap_boxes = np.zeros((anchorH * anchorW * num_anchor, 4), dtype="float32")
featuremap_boxes[inside_ind, :] = anchor_gt_boxes
featuremap_boxes = featuremap_boxes.reshape((anchorH, anchorW, num_anchor, 4))
return featuremap_labels, featuremap_boxes
# TODO: can probably merge single-level logic with FPN logic to simplify code
def get_multilevel_rpn_anchor_input(self, im, boxes, is_crowd):
"""
Args:
im: an image
boxes: nx4, floatbox, gt. shoudn't be changed
is_crowd: n,
Returns:
[(fm_labels, fm_boxes)]: Returns a tuple for each FPN level.
Each tuple contains the anchor labels and target boxes for each pixel in the featuremap.
fm_labels: fHxfWx NUM_ANCHOR_RATIOS
fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4
"""
boxes = boxes.copy()
anchors_per_level = get_all_anchors_fpn(
strides=self.cfg.FPN.ANCHOR_STRIDES,
sizes=self.cfg.RPN.ANCHOR_SIZES,
ratios=self.cfg.RPN.ANCHOR_RATIOS,
max_size=self.cfg.PREPROC.MAX_SIZE,
)
flatten_anchors_per_level = [k.reshape((-1, 4)) for k in anchors_per_level]
all_anchors_flatten = np.concatenate(flatten_anchors_per_level, axis=0)
inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, im.shape[:2])
anchor_labels, anchor_gt_boxes = self.get_anchor_labels(
inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1]
)
# map back to all_anchors, then split to each level
num_all_anchors = all_anchors_flatten.shape[0]
all_labels = -np.ones((num_all_anchors,), dtype="int32")
all_labels[inside_ind] = anchor_labels
all_boxes = np.zeros((num_all_anchors, 4), dtype="float32")
all_boxes[inside_ind] = anchor_gt_boxes
start = 0
multilevel_inputs = []
for level_anchor in anchors_per_level:
assert level_anchor.shape[2] == len(self.cfg.RPN.ANCHOR_RATIOS)
anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS
num_anchor_this_level = np.prod(anchor_shape)
end = start + num_anchor_this_level
multilevel_inputs.append(
(all_labels[start:end].reshape(anchor_shape), all_boxes[start:end, :].reshape(anchor_shape + (4,)))
)
start = end
assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors)
return multilevel_inputs
def get_anchor_labels(self, anchors, gt_boxes, crowd_boxes):
"""
Label each anchor as fg/bg/ignore.
Args:
anchors: Ax4 float
gt_boxes: Bx4 float, non-crowd
crowd_boxes: Cx4 float
Returns:
anchor_labels: (A,) int. Each element is {-1, 0, 1}
anchor_boxes: Ax4. Contains the target gt_box for each anchor when the anchor is fg.
"""
# This function will modify labels and return the filtered inds
def filter_box_label(labels, value, max_num):
curr_inds = np.where(labels == value)[0]
if len(curr_inds) > max_num:
disable_inds = np.random.choice(curr_inds, size=(len(curr_inds) - max_num), replace=False)
labels[disable_inds] = -1 # ignore them
curr_inds = np.where(labels == value)[0]
return curr_inds
NA, NB = len(anchors), len(gt_boxes)
if NB == 0:
# No groundtruth. All anchors are either background or ignored.
anchor_labels = np.zeros((NA,), dtype="int32")
filter_box_label(anchor_labels, 0, self.cfg.RPN.BATCH_PER_IM)
return anchor_labels, np.zeros((NA, 4), dtype="float32")
box_ious = np_iou(anchors, gt_boxes) # NA x NB
ious_argmax_per_anchor = box_ious.argmax(axis=1) # NA,
ious_max_per_anchor = box_ious.max(axis=1)
ious_max_per_gt = np.amax(box_ious, axis=0, keepdims=True) # 1xNB
# for each gt, find all those anchors (including ties) that has the max ious with it
anchors_with_max_iou_per_gt = np.where(box_ious == ious_max_per_gt)[0]
# Setting NA labels: 1--fg 0--bg -1--ignore
anchor_labels = -np.ones((NA,), dtype="int32") # NA,
# the order of setting neg/pos labels matter
anchor_labels[anchors_with_max_iou_per_gt] = 1
anchor_labels[ious_max_per_anchor >= self.cfg.RPN.POSITIVE_ANCHOR_THRESH] = 1
anchor_labels[ious_max_per_anchor < self.cfg.RPN.NEGATIVE_ANCHOR_THRESH] = 0
# label all non-ignore candidate boxes which overlap crowd as ignore
if crowd_boxes.size > 0:
cand_inds = np.where(anchor_labels >= 0)[0]
cand_anchors = anchors[cand_inds]
ioas = np_ioa(crowd_boxes, cand_anchors)
overlap_with_crowd = cand_inds[ioas.max(axis=0) > self.cfg.RPN.CROWD_OVERLAP_THRESH]
anchor_labels[overlap_with_crowd] = -1
# Subsample fg labels: ignore some fg if fg is too many
target_num_fg = int(self.cfg.RPN.BATCH_PER_IM * self.cfg.RPN.FG_RATIO)
fg_inds = filter_box_label(anchor_labels, 1, target_num_fg)
# Keep an image even if there is no foreground anchors
# if len(fg_inds) == 0:
# raise MalformedData("No valid foreground for RPN!")
# Subsample bg labels. num_bg is not allowed to be too many
old_num_bg = np.sum(anchor_labels == 0)
if old_num_bg == 0:
# No valid bg in this image, skip.
raise MalformedData("No valid background for RPN!")
target_num_bg = self.cfg.RPN.BATCH_PER_IM - len(fg_inds)
filter_box_label(anchor_labels, 0, target_num_bg) # ignore return values
# Set anchor boxes: the best gt_box for each fg anchor
anchor_boxes = np.zeros((NA, 4), dtype="float32")
fg_boxes = gt_boxes[ious_argmax_per_anchor[fg_inds], :]
anchor_boxes[fg_inds, :] = fg_boxes
# assert len(fg_inds) + np.sum(anchor_labels == 0) == self.cfg.RPN.BATCH_PER_IM
return anchor_labels, anchor_boxes
def get_train_dataflow():
"""
Return a training dataflow. Each datapoint consists of the following:
An image: (h, w, 3),
1 or more pairs of (anchor_labels, anchor_boxes):
anchor_labels: (h', w', NA)
anchor_boxes: (h', w', NA, 4)
gt_boxes: (N, 4)
gt_labels: (N,)
If MODE_MASK, gt_masks: (N, h, w)
"""
roidbs = list(itertools.chain.from_iterable(DatasetRegistry.get(x).training_roidbs() for x in cfg.DATA.TRAIN))
print_class_histogram(roidbs)
# Filter out images that have no gt boxes, but this filter shall not be applied for testing.
# The model does support training with empty images, but it is not useful for COCO.
num = len(roidbs)
if cfg.DATA.FILTER_EMPTY_ANNOTATIONS:
roidbs = list(filter(lambda img: len(img["boxes"][img["is_crowd"] == 0]) > 0, roidbs))
logger.info(
"Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}".format(
num - len(roidbs), len(roidbs)
)
)
ds = DataFromList(roidbs, shuffle=True)
preprocess = TrainingDataPreprocessor(cfg)
if cfg.DATA.NUM_WORKERS > 0:
if cfg.TRAINER == "horovod":
buffer_size = cfg.DATA.NUM_WORKERS * 10 # one dataflow for each process, therefore don't need large buffer
ds = MultiThreadMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size)
# MPI does not like fork()
else:
buffer_size = cfg.DATA.NUM_WORKERS * 20
ds = MultiProcessMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size)
else:
ds = MapData(ds, preprocess)
return ds
def get_eval_dataflow(name, shard=0, num_shards=1):
"""
Args:
name (str): name of the dataset to evaluate
shard, num_shards: to get subset of evaluation data
"""
roidbs = DatasetRegistry.get(name).inference_roidbs()
logger.info("Found {} images for inference.".format(len(roidbs)))
num_imgs = len(roidbs)
img_per_shard = num_imgs // num_shards
img_range = (shard * img_per_shard, (shard + 1) * img_per_shard if shard + 1 < num_shards else num_imgs)
# no filter for training
ds = DataFromListOfDict(roidbs[img_range[0]: img_range[1]], ["file_name", "image_id"])
def f(fname):
im = cv2.imread(fname, cv2.IMREAD_COLOR)
assert im is not None, fname
return im
ds = MapDataComponent(ds, f, 0)
# Evaluation itself may be multi-threaded, therefore don't add prefetch here.
return ds
if __name__ == "__main__":
import os
from tensorpack.dataflow import PrintData
from config import finalize_configs
register_coco(os.path.expanduser("~/data/coco"))
finalize_configs()
ds = get_train_dataflow()
ds = PrintData(ds, 10)
TestDataSpeed(ds, 50000).start()
for k in ds:
pass
| 16,896 | 40.111922 | 119 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/config.py | # -*- coding: utf-8 -*-
# File: config.py
import numpy as np
import os
import pprint
import six
from tensorpack.utils import logger
from tensorpack.utils.gpu import get_num_gpu
__all__ = ['config', 'finalize_configs']
class AttrDict():
_freezed = False
""" Avoid accidental creation of new hierarchies. """
def __getattr__(self, name):
if self._freezed:
raise AttributeError(name)
if name.startswith('_'):
# Do not mess with internals. Otherwise copy/pickle will fail
raise AttributeError(name)
ret = AttrDict()
setattr(self, name, ret)
return ret
def __setattr__(self, name, value):
if self._freezed and name not in self.__dict__:
raise AttributeError(
"Config was freezed! Unknown config: {}".format(name))
super().__setattr__(name, value)
def __str__(self):
return pprint.pformat(self.to_dict(), indent=1, width=100, compact=True)
__repr__ = __str__
def to_dict(self):
"""Convert to a nested dict. """
return {k: v.to_dict() if isinstance(v, AttrDict) else v
for k, v in self.__dict__.items() if not k.startswith('_')}
def from_dict(self, d):
self.freeze(False)
for k, v in d.items():
self_v = getattr(self, k)
if isinstance(self_v, AttrDict):
self_v.from_dict(v)
else:
setattr(self, k, v)
def update_args(self, args):
"""Update from command line args. """
for cfg in args:
keys, v = cfg.split('=', maxsplit=1)
keylist = keys.split('.')
dic = self
for i, k in enumerate(keylist[:-1]):
assert k in dir(dic), "Unknown config key: {}".format(keys)
dic = getattr(dic, k)
key = keylist[-1]
oldv = getattr(dic, key)
if not isinstance(oldv, str):
v = eval(v)
setattr(dic, key, v)
def freeze(self, freezed=True):
self._freezed = freezed
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.freeze(freezed)
# avoid silent bugs
def __eq__(self, _):
raise NotImplementedError()
def __ne__(self, _):
raise NotImplementedError()
config = AttrDict()
_C = config # short alias to avoid coding
# mode flags ---------------------
_C.TRAINER = 'replicated' # options: 'horovod', 'replicated'
_C.MODE_MASK = True # Faster R-CNN or Mask R-CNN
_C.MODE_FPN = True
# dataset -----------------------
_C.DATA.BASEDIR = '/path/to/your/DATA/DIR'
# All available dataset names are defined in `dataset/coco.py:register_coco`.
# All TRAIN dataset will be concatenated for training.
_C.DATA.TRAIN = ('coco_train2017',) # i.e. trainval35k
# Each VAL dataset will be evaluated separately (instead of concatenated)
_C.DATA.VAL = ('coco_val2017',) # AKA minival2014
# These two configs will be populated later inside `finalize_configs`.
_C.DATA.NUM_CATEGORY = -1 # without the background class (e.g., 80 for COCO)
_C.DATA.CLASS_NAMES = [] # NUM_CLASS (NUM_CATEGORY+1) strings, the first is "BG".
# whether the coordinates in your registered dataset are
# absolute pixel values in range [0, W or H] or relative values in [0, 1]
_C.DATA.ABSOLUTE_COORD = True
# Filter Negative Samples from dataset
_C.DATA.FILTER_EMPTY_ANNOTATIONS = True
# Number of data loading workers.
# In case of horovod training, this is the number of workers per-GPU (so you may want to use a smaller number).
# Set to 0 to disable parallel data loading
_C.DATA.NUM_WORKERS = 10
# backbone ----------------------
_C.BACKBONE.WEIGHTS = ''
# To train from scratch, set it to empty, and set FREEZE_AT to 0
# To train from ImageNet pre-trained models, use the one that matches your
# architecture from http://models.tensorpack.com under the 'FasterRCNN' section.
# To train from an existing COCO model, use the path to that file, and change
# the other configurations according to that model.
_C.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3] # for resnet50
# RESNET_NUM_BLOCKS = [3, 4, 23, 3] # for resnet101
_C.BACKBONE.FREEZE_AFFINE = False # do not train affine parameters inside norm layers
_C.BACKBONE.NORM = 'FreezeBN' # options: FreezeBN, SyncBN, GN, None
_C.BACKBONE.FREEZE_AT = 2 # options: 0, 1, 2. How many stages in backbone to freeze (not training)
# Use a base model with TF-preferred padding mode,
# which may pad more pixels on right/bottom than top/left.
# See https://github.com/tensorflow/tensorflow/issues/18213
# In tensorpack model zoo, ResNet models with TF_PAD_MODE=False are marked with "-AlignPadding".
# All other models under `ResNet/` in the model zoo are using TF_PAD_MODE=True.
# Using either one should probably give the same performance.
# We use the "AlignPadding" one just to be consistent with caffe2.
_C.BACKBONE.TF_PAD_MODE = False
_C.BACKBONE.STRIDE_1X1 = False # True for MSRA models
# schedule -----------------------
_C.TRAIN.NUM_GPUS = None # by default, will be set from code
_C.TRAIN.WEIGHT_DECAY = 1e-4
_C.TRAIN.BASE_LR = 1e-2 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.WARMUP = 1000 # in terms of iterations. This is not affected by #GPUs
_C.TRAIN.WARMUP_INIT_LR = 1e-5 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.STEPS_PER_EPOCH = 500
_C.TRAIN.STARTING_EPOCH = 1 # the first epoch to start with, useful to continue a training
# LR_SCHEDULE means equivalent steps when the total batch size is 8.
# It can be either a string like "3x" that refers to standard convention, or a list of int.
# LR_SCHEDULE=3x is the same as LR_SCHEDULE=[420000, 500000, 540000], which
# means to decrease LR at steps 420k and 500k and stop training at 540k.
# When the total bs!=8, the actual iterations to decrease learning rate, and
# the base learning rate are computed from BASE_LR and LR_SCHEDULE.
# Therefore, there is *no need* to modify the config if you only change the number of GPUs.
_C.TRAIN.LR_SCHEDULE = "1x" # "1x" schedule in detectron
_C.TRAIN.EVAL_PERIOD = 50 # period (epochs) to run evaluation
_C.TRAIN.CHECKPOINT_PERIOD = 20 # period (epochs) to save model
# preprocessing --------------------
# Alternative old (worse & faster) setting: 600
_C.PREPROC.TRAIN_SHORT_EDGE_SIZE = [800, 800] # [min, max] to sample from
_C.PREPROC.TEST_SHORT_EDGE_SIZE = 800
_C.PREPROC.MAX_SIZE = 1333
# mean and std in RGB order.
# Un-scaled version: [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
_C.PREPROC.PIXEL_MEAN = [123.675, 116.28, 103.53]
_C.PREPROC.PIXEL_STD = [58.395, 57.12, 57.375]
# anchors -------------------------
_C.RPN.ANCHOR_STRIDE = 16
_C.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512) # sqrtarea of the anchor box
_C.RPN.ANCHOR_RATIOS = (0.5, 1., 2.)
_C.RPN.POSITIVE_ANCHOR_THRESH = 0.7
_C.RPN.NEGATIVE_ANCHOR_THRESH = 0.3
# rpn training -------------------------
_C.RPN.FG_RATIO = 0.5 # fg ratio among selected RPN anchors
_C.RPN.BATCH_PER_IM = 256 # total (across FPN levels) number of anchors that are marked valid
_C.RPN.MIN_SIZE = 0
_C.RPN.PROPOSAL_NMS_THRESH = 0.7
# Anchors which overlap with a crowd box (IOA larger than threshold) will be ignored.
# Setting this to a value larger than 1.0 will disable the feature.
# It is disabled by default because Detectron does not do this.
_C.RPN.CROWD_OVERLAP_THRESH = 9.99
_C.RPN.HEAD_DIM = 1024 # used in C4 only
# RPN proposal selection -------------------------------
# for C4
_C.RPN.TRAIN_PRE_NMS_TOPK = 12000
_C.RPN.TRAIN_POST_NMS_TOPK = 2000
_C.RPN.TEST_PRE_NMS_TOPK = 6000
_C.RPN.TEST_POST_NMS_TOPK = 1000 # if you encounter OOM in inference, set this to a smaller number
# for FPN, #proposals per-level and #proposals after merging are (for now) the same
# if FPN.PROPOSAL_MODE = 'Joint', these options have no effect
_C.RPN.TRAIN_PER_LEVEL_NMS_TOPK = 2000
_C.RPN.TEST_PER_LEVEL_NMS_TOPK = 1000
# fastrcnn training ---------------------
_C.FRCNN.BATCH_PER_IM = 512
_C.FRCNN.BBOX_REG_WEIGHTS = [10., 10., 5., 5.] # Slightly better setting: 20, 20, 10, 10
_C.FRCNN.FG_THRESH = 0.5
_C.FRCNN.FG_RATIO = 0.25 # fg ratio in a ROI batch
# FPN -------------------------
_C.FPN.ANCHOR_STRIDES = (4, 8, 16, 32, 64) # strides for each FPN level. Must be the same length as ANCHOR_SIZES
_C.FPN.PROPOSAL_MODE = 'Level' # 'Level', 'Joint'
_C.FPN.NUM_CHANNEL = 256
_C.FPN.NORM = 'None' # 'None', 'GN'
# The head option is only used in FPN. For C4 models, the head is C5
_C.FPN.FRCNN_HEAD_FUNC = 'fastrcnn_2fc_head'
# choices: fastrcnn_2fc_head, fastrcnn_4conv1fc_{,gn_}head
_C.FPN.FRCNN_CONV_HEAD_DIM = 256
_C.FPN.FRCNN_FC_HEAD_DIM = 1024
_C.FPN.MRCNN_HEAD_FUNC = 'maskrcnn_up4conv_head' # choices: maskrcnn_up4conv_{,gn_}head
# Mask R-CNN
_C.MRCNN.HEAD_DIM = 256
_C.MRCNN.ACCURATE_PASTE = True # slightly more aligned results, but very slow on numpy
# Cascade R-CNN, only available in FPN mode
_C.FPN.CASCADE = False
_C.CASCADE.IOUS = [0.5, 0.6, 0.7]
_C.CASCADE.BBOX_REG_WEIGHTS = [[10., 10., 5., 5.], [20., 20., 10., 10.], [30., 30., 15., 15.]]
# testing -----------------------
_C.TEST.FRCNN_NMS_THRESH = 0.5
# Smaller threshold value gives significantly better mAP. But we use 0.05 for consistency with Detectron.
# mAP with 1e-4 threshold can be found at https://github.com/tensorpack/tensorpack/commit/26321ae58120af2568bdbf2269f32aa708d425a8#diff-61085c48abee915b584027e1085e1043 # noqa
_C.TEST.RESULT_SCORE_THRESH = 0.05
_C.TEST.RESULT_SCORE_THRESH_VIS = 0.5 # only visualize confident results
_C.TEST.RESULTS_PER_IM = 100
_C.freeze() # avoid typo / wrong config keys
def finalize_configs(is_training):
"""
Run some sanity checks, and populate some configs from others
"""
_C.freeze(False) # populate new keys now
if isinstance(_C.DATA.VAL, six.string_types): # support single string (the typical case) as well
_C.DATA.VAL = (_C.DATA.VAL, )
if isinstance(_C.DATA.TRAIN, six.string_types): # support single string
_C.DATA.TRAIN = (_C.DATA.TRAIN, )
# finalize dataset definitions ...
from dataset import DatasetRegistry
datasets = list(_C.DATA.TRAIN) + list(_C.DATA.VAL)
_C.DATA.CLASS_NAMES = DatasetRegistry.get_metadata(datasets[0], "class_names")
_C.DATA.NUM_CATEGORY = len(_C.DATA.CLASS_NAMES) - 1
assert _C.BACKBONE.NORM in ['FreezeBN', 'SyncBN', 'GN', 'None'], _C.BACKBONE.NORM
if _C.BACKBONE.NORM != 'FreezeBN':
assert not _C.BACKBONE.FREEZE_AFFINE
assert _C.BACKBONE.FREEZE_AT in [0, 1, 2]
_C.RPN.NUM_ANCHOR = len(_C.RPN.ANCHOR_SIZES) * len(_C.RPN.ANCHOR_RATIOS)
assert len(_C.FPN.ANCHOR_STRIDES) == len(_C.RPN.ANCHOR_SIZES)
# image size into the backbone has to be multiple of this number
_C.FPN.RESOLUTION_REQUIREMENT = _C.FPN.ANCHOR_STRIDES[3] # [3] because we build FPN with features r2,r3,r4,r5
if _C.MODE_FPN:
size_mult = _C.FPN.RESOLUTION_REQUIREMENT * 1.
_C.PREPROC.MAX_SIZE = np.ceil(_C.PREPROC.MAX_SIZE / size_mult) * size_mult
assert _C.FPN.PROPOSAL_MODE in ['Level', 'Joint']
assert _C.FPN.FRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.MRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.NORM in ['None', 'GN']
if _C.FPN.CASCADE:
# the first threshold is the proposal sampling threshold
assert _C.CASCADE.IOUS[0] == _C.FRCNN.FG_THRESH
assert len(_C.CASCADE.BBOX_REG_WEIGHTS) == len(_C.CASCADE.IOUS)
if is_training:
train_scales = _C.PREPROC.TRAIN_SHORT_EDGE_SIZE
if isinstance(train_scales, (list, tuple)) and train_scales[1] - train_scales[0] > 100:
# don't autotune if augmentation is on
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '1'
assert _C.TRAINER in ['horovod', 'replicated'], _C.TRAINER
lr = _C.TRAIN.LR_SCHEDULE
if isinstance(lr, six.string_types):
if lr.endswith("x"):
LR_SCHEDULE_KITER = {
"{}x".format(k):
[180 * k - 120, 180 * k - 40, 180 * k]
for k in range(2, 10)}
LR_SCHEDULE_KITER["1x"] = [120, 160, 180]
_C.TRAIN.LR_SCHEDULE = [x * 1000 for x in LR_SCHEDULE_KITER[lr]]
else:
_C.TRAIN.LR_SCHEDULE = eval(lr)
# setup NUM_GPUS
if _C.TRAINER == 'horovod':
import horovod.tensorflow as hvd
ngpu = hvd.size()
logger.info("Horovod Rank={}, Size={}, LocalRank={}".format(
hvd.rank(), hvd.size(), hvd.local_rank()))
else:
assert 'OMPI_COMM_WORLD_SIZE' not in os.environ
ngpu = get_num_gpu()
assert ngpu > 0, "Has to train with GPU!"
assert ngpu % 8 == 0 or 8 % ngpu == 0, "Can only train with 1,2,4 or >=8 GPUs, but found {} GPUs".format(ngpu)
else:
# autotune is too slow for inference
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
ngpu = get_num_gpu()
if _C.TRAIN.NUM_GPUS is None:
_C.TRAIN.NUM_GPUS = ngpu
else:
if _C.TRAINER == 'horovod':
assert _C.TRAIN.NUM_GPUS == ngpu
else:
assert _C.TRAIN.NUM_GPUS <= ngpu
_C.freeze()
logger.info("Config: ------------------------------------------\n" + str(_C))
| 13,461 | 40.678019 | 176 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/common.py | # -*- coding: utf-8 -*-
# File: common.py
import numpy as np
import cv2
from tensorpack.dataflow import RNGDataFlow
from tensorpack.dataflow.imgaug import ImageAugmentor, ResizeTransform
class DataFromListOfDict(RNGDataFlow):
def __init__(self, lst, keys, shuffle=False):
self._lst = lst
self._keys = keys
self._shuffle = shuffle
self._size = len(lst)
def __len__(self):
return self._size
def __iter__(self):
if self._shuffle:
self.rng.shuffle(self._lst)
for dic in self._lst:
dp = [dic[k] for k in self._keys]
yield dp
class CustomResize(ImageAugmentor):
"""
Try resizing the shortest edge to a certain number
while avoiding the longest edge to exceed max_size.
"""
def __init__(self, short_edge_length, max_size, interp=cv2.INTER_LINEAR):
"""
Args:
short_edge_length ([int, int]): a [min, max] interval from which to sample the
shortest edge length.
max_size (int): maximum allowed longest edge length.
"""
super(CustomResize, self).__init__()
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
size = self.rng.randint(
self.short_edge_length[0], self.short_edge_length[1] + 1)
scale = size * 1.0 / min(h, w)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return ResizeTransform(h, w, newh, neww, self.interp)
def box_to_point4(boxes):
"""
Convert boxes to its corner points.
Args:
boxes: nx4
Returns:
(nx4)x2
"""
b = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]]
b = b.reshape((-1, 2))
return b
def point4_to_box(points):
"""
Args:
points: (nx4)x2
Returns:
nx4 boxes (x1y1x2y2)
"""
p = points.reshape((-1, 4, 2))
minxy = p.min(axis=1) # nx2
maxxy = p.max(axis=1) # nx2
return np.concatenate((minxy, maxxy), axis=1)
def polygons_to_mask(polys, height, width):
"""
Convert polygons to binary masks.
Args:
polys: a list of nx2 float array. Each array contains many (x, y) coordinates.
Returns:
a binary matrix of (height, width)
"""
polys = [p.flatten().tolist() for p in polys]
assert len(polys) > 0, "Polygons are empty!"
import pycocotools.mask as cocomask
rles = cocomask.frPyObjects(polys, height, width)
rle = cocomask.merge(rles)
return cocomask.decode(rle)
def clip_boxes(boxes, shape):
"""
Args:
boxes: (...)x4, float
shape: h, w
"""
orig_shape = boxes.shape
boxes = boxes.reshape([-1, 4])
h, w = shape
boxes[:, [0, 1]] = np.maximum(boxes[:, [0, 1]], 0)
boxes[:, 2] = np.minimum(boxes[:, 2], w)
boxes[:, 3] = np.minimum(boxes[:, 3], h)
return boxes.reshape(orig_shape)
def filter_boxes_inside_shape(boxes, shape):
"""
Args:
boxes: (nx4), float
shape: (h, w)
Returns:
indices: (k, )
selection: (kx4)
"""
assert boxes.ndim == 2, boxes.shape
assert len(shape) == 2, shape
h, w = shape
indices = np.where(
(boxes[:, 0] >= 0) &
(boxes[:, 1] >= 0) &
(boxes[:, 2] <= w) &
(boxes[:, 3] <= h))[0]
return indices, boxes[indices, :]
try:
import pycocotools.mask as cocomask
# Much faster than utils/np_box_ops
def np_iou(A, B):
def to_xywh(box):
box = box.copy()
box[:, 2] -= box[:, 0]
box[:, 3] -= box[:, 1]
return box
ret = cocomask.iou(
to_xywh(A), to_xywh(B),
np.zeros((len(B),), dtype=np.bool))
# can accelerate even more, if using float32
return ret.astype('float32')
except ImportError:
from utils.np_box_ops import iou as np_iou # noqa
| 4,285 | 24.664671 | 90 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/eval.py | # -*- coding: utf-8 -*-
# File: eval.py
import itertools
import json
import numpy as np
import os
import sys
import tensorflow as tf
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from contextlib import ExitStack
import cv2
import pycocotools.mask as cocomask
import tqdm
from scipy import interpolate
from tensorpack.callbacks import Callback
from tensorpack.tfutils.common import get_tf_version_tuple
from tensorpack.utils import logger, get_tqdm
from common import CustomResize, clip_boxes
from config import config as cfg
from data import get_eval_dataflow
from dataset import DatasetRegistry
try:
import horovod.tensorflow as hvd
except ImportError:
pass
DetectionResult = namedtuple(
'DetectionResult',
['box', 'score', 'class_id', 'mask'])
"""
box: 4 float
score: float
class_id: int, 1~NUM_CLASS
mask: None, or a binary image of the original image shape
"""
def _scale_box(box, scale):
w_half = (box[2] - box[0]) * 0.5
h_half = (box[3] - box[1]) * 0.5
x_c = (box[2] + box[0]) * 0.5
y_c = (box[3] + box[1]) * 0.5
w_half *= scale
h_half *= scale
scaled_box = np.zeros_like(box)
scaled_box[0] = x_c - w_half
scaled_box[2] = x_c + w_half
scaled_box[1] = y_c - h_half
scaled_box[3] = y_c + h_half
return scaled_box
def _paste_mask(box, mask, shape):
"""
Args:
box: 4 float
mask: MxM floats
shape: h,w
Returns:
A uint8 binary image of hxw.
"""
assert mask.shape[0] == mask.shape[1], mask.shape
if cfg.MRCNN.ACCURATE_PASTE:
# This method is accurate but much slower.
mask = np.pad(mask, [(1, 1), (1, 1)], mode='constant')
box = _scale_box(box, float(mask.shape[0]) / (mask.shape[0] - 2))
mask_pixels = np.arange(0.0, mask.shape[0]) + 0.5
mask_continuous = interpolate.interp2d(mask_pixels, mask_pixels, mask, fill_value=0.0)
h, w = shape
ys = np.arange(0.0, h) + 0.5
xs = np.arange(0.0, w) + 0.5
ys = (ys - box[1]) / (box[3] - box[1]) * mask.shape[0]
xs = (xs - box[0]) / (box[2] - box[0]) * mask.shape[1]
# Waste a lot of compute since most indices are out-of-border
res = mask_continuous(xs, ys)
return (res >= 0.5).astype('uint8')
else:
# This method (inspired by Detectron) is less accurate but fast.
# int() is floor
# box fpcoor=0.0 -> intcoor=0.0
x0, y0 = list(map(int, box[:2] + 0.5))
# box fpcoor=h -> intcoor=h-1, inclusive
x1, y1 = list(map(int, box[2:] - 0.5)) # inclusive
x1 = max(x0, x1) # require at least 1x1
y1 = max(y0, y1)
w = x1 + 1 - x0
h = y1 + 1 - y0
# rounding errors could happen here, because masks were not originally computed for this shape.
# but it's hard to do better, because the network does not know the "original" scale
mask = (cv2.resize(mask, (w, h)) > 0.5).astype('uint8')
ret = np.zeros(shape, dtype='uint8')
ret[y0:y1 + 1, x0:x1 + 1] = mask
return ret
def predict_image(img, model_func):
"""
Run detection on one image, using the TF callable.
This function should handle the preprocessing internally.
Args:
img: an image
model_func: a callable from the TF model.
It takes image and returns (boxes, probs, labels, [masks])
Returns:
[DetectionResult]
"""
orig_shape = img.shape[:2]
resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)
resized_img = resizer.augment(img)
scale = np.sqrt(resized_img.shape[0] * 1.0 / img.shape[0] * resized_img.shape[1] / img.shape[1])
boxes, probs, labels, *masks = model_func(resized_img)
# Some slow numpy postprocessing:
boxes = boxes / scale
# boxes are already clipped inside the graph, but after the floating point scaling, this may not be true any more.
boxes = clip_boxes(boxes, orig_shape)
if masks:
full_masks = [_paste_mask(box, mask, orig_shape)
for box, mask in zip(boxes, masks[0])]
masks = full_masks
else:
# fill with none
masks = [None] * len(boxes)
results = [DetectionResult(*args) for args in zip(boxes, probs, labels.tolist(), masks)]
return results
def predict_dataflow(df, model_func, tqdm_bar=None):
"""
Args:
df: a DataFlow which produces (image, image_id)
model_func: a callable from the TF model.
It takes image and returns (boxes, probs, labels, [masks])
tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
will create a new one.
Returns:
list of dict, in the format used by
`DatasetSplit.eval_inference_results`
"""
df.reset_state()
all_results = []
with ExitStack() as stack:
# tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
if tqdm_bar is None:
tqdm_bar = stack.enter_context(get_tqdm(total=df.size()))
for img, img_id in df:
results = predict_image(img, model_func)
for r in results:
# int()/float() to make it json-serializable
res = {
'image_id': img_id,
'category_id': int(r.class_id),
'bbox': [round(float(x), 4) for x in r.box],
'score': round(float(r.score), 4),
}
# also append segmentation to results
if r.mask is not None:
rle = cocomask.encode(
np.array(r.mask[:, :, None], order='F'))[0]
rle['counts'] = rle['counts'].decode('ascii')
res['segmentation'] = rle
all_results.append(res)
tqdm_bar.update(1)
return all_results
def multithread_predict_dataflow(dataflows, model_funcs):
"""
Running multiple `predict_dataflow` in multiple threads, and aggregate the results.
Args:
dataflows: a list of DataFlow to be used in :func:`predict_dataflow`
model_funcs: a list of callable to be used in :func:`predict_dataflow`
Returns:
list of dict, in the format used by
`DatasetSplit.eval_inference_results`
"""
num_worker = len(model_funcs)
assert len(dataflows) == num_worker
if num_worker == 1:
return predict_dataflow(dataflows[0], model_funcs[0])
kwargs = {'thread_name_prefix': 'EvalWorker'} if sys.version_info.minor >= 6 else {}
with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, \
tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar:
futures = []
for dataflow, pred in zip(dataflows, model_funcs):
futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar))
all_results = list(itertools.chain(*[fut.result() for fut in futures]))
return all_results
class EvalCallback(Callback):
"""
A callback that runs evaluation once a while.
It supports multi-gpu evaluation.
"""
_chief_only = False
def __init__(self, eval_dataset, in_names, out_names, output_dir):
self._eval_dataset = eval_dataset
self._in_names, self._out_names = in_names, out_names
self._output_dir = output_dir
def _setup_graph(self):
num_gpu = cfg.TRAIN.NUM_GPUS
if cfg.TRAINER == 'replicated':
# TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750
buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)]
# Use two predictor threads per GPU to get better throughput
self.num_predictor = num_gpu if buggy_tf else num_gpu * 2
self.predictors = [self._build_predictor(k % num_gpu) for k in range(self.num_predictor)]
self.dataflows = [get_eval_dataflow(self._eval_dataset,
shard=k, num_shards=self.num_predictor)
for k in range(self.num_predictor)]
else:
# Only eval on the first machine,
# Because evaluation assumes that all horovod workers share the filesystem.
# Alternatively, can eval on all ranks and use allgather, but allgather sometimes hangs
self._horovod_run_eval = hvd.rank() == hvd.local_rank()
if self._horovod_run_eval:
self.predictor = self._build_predictor(0)
self.dataflow = get_eval_dataflow(self._eval_dataset,
shard=hvd.local_rank(), num_shards=hvd.local_size())
self.barrier = hvd.allreduce(tf.random_normal(shape=[1]))
def _build_predictor(self, idx):
return self.trainer.get_predictor(self._in_names, self._out_names, device=idx)
def _before_train(self):
eval_period = cfg.TRAIN.EVAL_PERIOD
self.epochs_to_eval = set()
for k in itertools.count(1):
if k * eval_period > self.trainer.max_epoch:
break
self.epochs_to_eval.add(k * eval_period)
self.epochs_to_eval.add(self.trainer.max_epoch)
logger.info("[EvalCallback] Will evaluate every {} epochs".format(eval_period))
def _eval(self):
logdir = self._output_dir
if cfg.TRAINER == 'replicated':
all_results = multithread_predict_dataflow(self.dataflows, self.predictors)
else:
filenames = [os.path.join(
logdir, 'outputs{}-part{}.json'.format(self.global_step, rank)
) for rank in range(hvd.local_size())]
if self._horovod_run_eval:
local_results = predict_dataflow(self.dataflow, self.predictor)
fname = filenames[hvd.local_rank()]
with open(fname, 'w') as f:
json.dump(local_results, f)
self.barrier.eval()
if hvd.rank() > 0:
return
all_results = []
for fname in filenames:
with open(fname, 'r') as f:
obj = json.load(f)
all_results.extend(obj)
os.unlink(fname)
scores = DatasetRegistry.get(self._eval_dataset).eval_inference_results(all_results)
for k, v in scores.items():
self.trainer.monitors.put_scalar(self._eval_dataset + '-' + k, v)
def _trigger_epoch(self):
if self.epoch_num in self.epochs_to_eval:
logger.info("Running evaluation ...")
self._eval()
| 10,667 | 35.409556 | 118 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/viz.py | # -*- coding: utf-8 -*-
# File: viz.py
import numpy as np
from tensorpack.utils import viz
from tensorpack.utils.palette import PALETTE_RGB
from config import config as cfg
from utils.np_box_ops import area as np_area
from utils.np_box_ops import iou as np_iou
from common import polygons_to_mask
def draw_annotation(img, boxes, klass, polygons=None, is_crowd=None):
"""Will not modify img"""
labels = []
assert len(boxes) == len(klass)
if is_crowd is not None:
assert len(boxes) == len(is_crowd)
for cls, crd in zip(klass, is_crowd):
clsname = cfg.DATA.CLASS_NAMES[cls]
if crd == 1:
clsname += ';Crowd'
labels.append(clsname)
else:
for cls in klass:
labels.append(cfg.DATA.CLASS_NAMES[cls])
img = viz.draw_boxes(img, boxes, labels)
if polygons is not None:
for p in polygons:
mask = polygons_to_mask(p, img.shape[0], img.shape[1])
img = draw_mask(img, mask)
return img
def draw_proposal_recall(img, proposals, proposal_scores, gt_boxes):
"""
Draw top3 proposals for each gt.
Args:
proposals: NPx4
proposal_scores: NP
gt_boxes: NG
"""
box_ious = np_iou(gt_boxes, proposals) # ng x np
box_ious_argsort = np.argsort(-box_ious, axis=1)
good_proposals_ind = box_ious_argsort[:, :3] # for each gt, find 3 best proposals
good_proposals_ind = np.unique(good_proposals_ind.ravel())
proposals = proposals[good_proposals_ind, :]
tags = list(map(str, proposal_scores[good_proposals_ind]))
img = viz.draw_boxes(img, proposals, tags)
return img, good_proposals_ind
def draw_predictions(img, boxes, scores):
"""
Args:
boxes: kx4
scores: kxC
"""
if len(boxes) == 0:
return img
labels = scores.argmax(axis=1)
scores = scores.max(axis=1)
tags = ["{},{:.2f}".format(cfg.DATA.CLASS_NAMES[lb], score) for lb, score in zip(labels, scores)]
return viz.draw_boxes(img, boxes, tags)
def draw_final_outputs(img, results):
"""
Args:
results: [DetectionResult]
"""
if len(results) == 0:
return img
# Display in largest to smallest order to reduce occlusion
boxes = np.asarray([r.box for r in results])
areas = np_area(boxes)
sorted_inds = np.argsort(-areas)
ret = img
tags = []
for result_id in sorted_inds:
r = results[result_id]
if r.mask is not None:
ret = draw_mask(ret, r.mask)
for r in results:
tags.append(
"{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score))
ret = viz.draw_boxes(ret, boxes, tags)
return ret
def draw_final_outputs_blackwhite(img, results):
"""
Args:
results: [DetectionResult]
"""
img_bw = img.mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if len(results) == 0:
return img_bw
boxes = np.asarray([r.box for r in results])
all_masks = [r.mask for r in results]
if all_masks[0] is not None:
m = all_masks[0] > 0
for m2 in all_masks[1:]:
m = m | (m2 > 0)
img_bw[m] = img[m]
tags = ["{},{:.2f}".format(cfg.DATA.CLASS_NAMES[r.class_id], r.score) for r in results]
ret = viz.draw_boxes(img_bw, boxes, tags)
return ret
def draw_mask(im, mask, alpha=0.5, color=None):
"""
Overlay a mask on top of the image.
Args:
im: a 3-channel uint8 image in BGR
mask: a binary 1-channel image of the same size
color: if None, will choose automatically
"""
if color is None:
color = PALETTE_RGB[np.random.choice(len(PALETTE_RGB))][::-1]
color = np.asarray(color, dtype=np.float32)
im = np.where(np.repeat((mask > 0)[:, :, None], 3, axis=2),
im * (1 - alpha) + color * alpha, im)
im = im.astype('uint8')
return im
| 3,923 | 27.028571 | 101 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/predict.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import itertools
import numpy as np
import os
import shutil
import tensorflow as tf
import cv2
import tqdm
import tensorpack.utils.viz as tpviz
from tensorpack.predict import MultiTowerOfflinePredictor, OfflinePredictor, PredictConfig
from tensorpack.tfutils import SmartInit, get_tf_version_tuple
from tensorpack.tfutils.export import ModelExporter
from tensorpack.utils import fs, logger
from dataset import DatasetRegistry, register_coco, register_balloon
from config import config as cfg
from config import finalize_configs
from data import get_eval_dataflow, get_train_dataflow
from eval import DetectionResult, multithread_predict_dataflow, predict_image
from modeling.generalized_rcnn import ResNetC4Model, ResNetFPNModel
from viz import (
draw_annotation, draw_final_outputs, draw_predictions,
draw_proposal_recall, draw_final_outputs_blackwhite)
def do_visualize(model, model_path, nr_visualize=100, output_dir='output'):
"""
Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
"""
df = get_train_dataflow()
df.reset_state()
pred = OfflinePredictor(PredictConfig(
model=model,
session_init=SmartInit(model_path),
input_names=['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores',
'output/labels',
]))
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
fs.mkdir_p(output_dir)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)
# draw groundtruth boxes
gt_viz = draw_annotation(img, gt_boxes, gt_labels)
# draw best proposals for each groundtruth, to show recall
proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
# draw the scores for the above proposals
score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])
results = [DetectionResult(*args) for args in
zip(final_boxes, final_scores, final_labels,
[None] * len(final_labels))]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([
gt_viz, proposal_viz,
score_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
pbar.update()
def do_evaluate(pred_config, output_file):
num_tower = max(cfg.TRAIN.NUM_GPUS, 1)
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_tower))).get_predictors()
for dataset in cfg.DATA.VAL:
logger.info("Evaluating {} ...".format(dataset))
dataflows = [
get_eval_dataflow(dataset, shard=k, num_shards=num_tower)
for k in range(num_tower)]
all_results = multithread_predict_dataflow(dataflows, graph_funcs)
output = output_file + '-' + dataset
DatasetRegistry.get(dataset).eval_inference_results(all_results, output)
def do_predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = predict_image(img, pred_func)
if cfg.MODE_MASK:
final = draw_final_outputs_blackwhite(img, results)
else:
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
cv2.imwrite("output.png", viz)
logger.info("Inference output for {} written to output.png".format(input_file))
tpviz.interactive_imshow(viz)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation.', required=True)
parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument('--evaluate', help="Run evaluation. "
"This argument is the path to the output json evaluation file")
parser.add_argument('--predict', help="Run prediction on a given image. "
"This argument is the path to the input image file", nargs='+')
parser.add_argument('--benchmark', action='store_true', help="Benchmark the speed of the model + postprocessing")
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
parser.add_argument('--output-pb', help='Save a model to .pb')
parser.add_argument('--output-serving', help='Save a model to serving file')
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
register_coco(cfg.DATA.BASEDIR) # add COCO datasets to the registry
register_balloon(cfg.DATA.BASEDIR)
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
if not tf.test.is_gpu_available():
from tensorflow.python.framework import test_util
assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \
"Inference requires either GPU support or MKL support!"
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
if args.visualize:
do_visualize(MODEL, args.load)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=SmartInit(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.output_pb:
ModelExporter(predcfg).export_compact(args.output_pb, optimize=False)
elif args.output_serving:
ModelExporter(predcfg).export_serving(args.output_serving)
if args.predict:
predictor = OfflinePredictor(predcfg)
for image_file in args.predict:
do_predict(predictor, image_file)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
do_evaluate(predcfg, args.evaluate)
elif args.benchmark:
df = get_eval_dataflow(cfg.DATA.VAL[0])
df.reset_state()
predictor = OfflinePredictor(predcfg)
for _, img in enumerate(tqdm.tqdm(df, total=len(df), smoothing=0.5)):
# This includes post-processing time, which is done on CPU and not optimized
# To exclude it, modify `predict_image`.
predict_image(img[0], predictor)
| 7,152 | 41.076471 | 117 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/train.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train.py
import argparse
from tensorpack import *
from tensorpack.tfutils import collect_env_info
from tensorpack.tfutils.common import get_tf_version_tuple
from dataset import register_coco, register_balloon
from config import config as cfg
from config import finalize_configs
from data import get_train_dataflow
from eval import EvalCallback
from modeling.generalized_rcnn import ResNetC4Model, ResNetFPNModel
try:
import horovod.tensorflow as hvd
except ImportError:
pass
if __name__ == '__main__':
# "spawn/forkserver" is safer than the default "fork" method and
# produce more deterministic behavior & memory saving
# However its limitation is you cannot pass a lambda function to subprocesses.
import multiprocessing as mp
mp.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='Load a model to start training from. It overwrites BACKBONE.WEIGHTS')
parser.add_argument('--logdir', help='Log directory. Will remove the old one if already exists.',
default='train_log/maskrcnn')
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py", nargs='+')
if get_tf_version_tuple() < (1, 6):
# https://github.com/tensorflow/tensorflow/issues/14657
logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.")
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
register_coco(cfg.DATA.BASEDIR) # add COCO datasets to the registry
register_balloon(cfg.DATA.BASEDIR) # add the demo balloon datasets to the registry
# Setup logging ...
is_horovod = cfg.TRAINER == 'horovod'
if is_horovod:
hvd.init()
if not is_horovod or hvd.rank() == 0:
logger.set_logger_dir(args.logdir, 'd')
logger.info("Environment Information:\n" + collect_env_info())
finalize_configs(is_training=True)
# Create model
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
# Compute the training schedule from the number of GPUs ...
stepnum = cfg.TRAIN.STEPS_PER_EPOCH
# warmup is step based, lr is epoch based
init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]
factor = 8. / cfg.TRAIN.NUM_GPUS
for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
mult = 0.1 ** (idx + 1)
lr_schedule.append(
(steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
logger.info("LR Schedule (epochs, value): " + str(lr_schedule))
train_dataflow = get_train_dataflow()
# This is what's commonly referred to as "epochs"
total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()
logger.info("Total passes of the training set is: {:.5g}".format(total_passes))
# Create callbacks ...
callbacks = [
PeriodicCallback(
ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),
every_k_epochs=cfg.TRAIN.CHECKPOINT_PERIOD),
# linear warmup
ScheduledHyperParamSetter(
'learning_rate', warmup_schedule, interp='linear', step_based=True),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
GPUMemoryTracker(),
HostMemoryTracker(),
ThroughputTracker(samples_per_step=cfg.TRAIN.NUM_GPUS),
EstimatedTimeLeft(median=True),
SessionRunTimeout(60000), # 1 minute timeout
GPUUtilizationTracker()
]
if cfg.TRAIN.EVAL_PERIOD > 0:
callbacks.extend([
EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)
for dataset in cfg.DATA.VAL
])
if is_horovod and hvd.rank() > 0:
session_init = None
else:
if args.load:
# ignore mismatched values, so you can `--load` a model for fine-tuning
session_init = SmartInit(args.load, ignore_mismatch=True)
else:
session_init = SmartInit(cfg.BACKBONE.WEIGHTS)
traincfg = TrainConfig(
model=MODEL,
data=QueueInput(train_dataflow),
callbacks=callbacks,
steps_per_epoch=stepnum,
max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,
session_init=session_init,
starting_epoch=cfg.TRAIN.STARTING_EPOCH
)
if is_horovod:
trainer = HorovodTrainer(average=False)
else:
# nccl mode appears faster than cpu mode
trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')
launch_train_with_config(traincfg, trainer)
| 4,916 | 37.716535 | 114 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/dataset/balloon.py | import os
import numpy as np
import json
from dataset import DatasetSplit, DatasetRegistry
__all__ = ["register_balloon"]
class BalloonDemo(DatasetSplit):
def __init__(self, base_dir, split):
assert split in ["train", "val"]
base_dir = os.path.expanduser(base_dir)
self.imgdir = os.path.join(base_dir, split)
assert os.path.isdir(self.imgdir), self.imgdir
def training_roidbs(self):
json_file = os.path.join(self.imgdir, "via_region_data.json")
with open(json_file) as f:
obj = json.load(f)
ret = []
for _, v in obj.items():
fname = v["filename"]
fname = os.path.join(self.imgdir, fname)
roidb = {"file_name": fname}
annos = v["regions"]
boxes = []
segs = []
for _, anno in annos.items():
assert not anno["region_attributes"]
anno = anno["shape_attributes"]
px = anno["all_points_x"]
py = anno["all_points_y"]
poly = np.stack((px, py), axis=1) + 0.5
maxxy = poly.max(axis=0)
minxy = poly.min(axis=0)
boxes.append([minxy[0], minxy[1], maxxy[0], maxxy[1]])
segs.append([poly])
N = len(annos)
roidb["boxes"] = np.asarray(boxes, dtype=np.float32)
roidb["segmentation"] = segs
roidb["class"] = np.ones((N, ), dtype=np.int32)
roidb["is_crowd"] = np.zeros((N, ), dtype=np.int8)
ret.append(roidb)
return ret
def register_balloon(basedir):
for split in ["train", "val"]:
name = "balloon_" + split
DatasetRegistry.register(name, lambda x=split: BalloonDemo(basedir, x))
DatasetRegistry.register_metadata(name, "class_names", ["BG", "balloon"])
if __name__ == '__main__':
basedir = '~/data/balloon'
roidbs = BalloonDemo(basedir, "train").training_roidbs()
print("#images:", len(roidbs))
from viz import draw_annotation
from tensorpack.utils.viz import interactive_imshow as imshow
import cv2
for r in roidbs:
im = cv2.imread(r["file_name"])
vis = draw_annotation(im, r["boxes"], r["class"], r["segmentation"])
imshow(vis)
| 2,297 | 31.366197 | 81 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/dataset/dataset.py | # -*- coding: utf-8 -*-
from collections import defaultdict
__all__ = ['DatasetRegistry', 'DatasetSplit']
class DatasetSplit():
"""
A class to load datasets, evaluate results for a datast split (e.g., "coco_train_2017")
To use your own dataset that's not in COCO format, write a subclass that
implements the interfaces.
"""
def training_roidbs(self):
"""
Returns:
roidbs (list[dict]):
Produce "roidbs" as a list of dict, each dict corresponds to one image with k>=0 instances.
and the following keys are expected for training:
file_name: str, full path to the image
boxes: numpy array of kx4 floats, each row is [x1, y1, x2, y2]
class: numpy array of k integers, in the range of [1, #categories], NOT [0, #categories)
is_crowd: k booleans. Use k False if you don't know what it means.
segmentation: k lists of numpy arrays.
Each list of numpy arrays corresponds to the mask for one instance.
Each numpy array in the list is a polygon of shape Nx2,
because one mask can be represented by N polygons.
Each row in the Nx2 array is a (x, y) coordinate.
If your segmentation annotations are originally masks rather than polygons,
either convert it, or the augmentation will need to be changed or skipped accordingly.
Include this field only if training Mask R-CNN.
Coordinates in boxes & polygons are absolute coordinates in unit of pixels, unless
cfg.DATA.ABSOLUTE_COORD is False.
"""
raise NotImplementedError()
def inference_roidbs(self):
"""
Returns:
roidbs (list[dict]):
Each dict corresponds to one image to run inference on. The
following keys in the dict are expected:
file_name (str): full path to the image
image_id (str): an id for the image. The inference results will be stored with this id.
"""
raise NotImplementedError()
def eval_inference_results(self, results, output=None):
"""
Args:
results (list[dict]): the inference results as dicts.
Each dict corresponds to one __instance__. It contains the following keys:
image_id (str): the id that matches `inference_roidbs`.
category_id (int): the category prediction, in range [1, #category]
bbox (list[float]): x1, y1, x2, y2
score (float):
segmentation: the segmentation mask in COCO's rle format.
output (str): the output file or directory to optionally save the results to.
Returns:
dict: the evaluation results.
"""
raise NotImplementedError()
class DatasetRegistry():
_registry = {}
_metadata_registry = defaultdict(dict)
@staticmethod
def register(name, func):
"""
Args:
name (str): the name of the dataset split, e.g. "coco_train2017"
func: a function which returns an instance of `DatasetSplit`
"""
assert name not in DatasetRegistry._registry, "Dataset {} was registered already!".format(name)
DatasetRegistry._registry[name] = func
@staticmethod
def get(name):
"""
Args:
name (str): the name of the dataset split, e.g. "coco_train2017"
Returns:
DatasetSplit
"""
assert name in DatasetRegistry._registry, "Dataset {} was not registered!".format(name)
return DatasetRegistry._registry[name]()
@staticmethod
def register_metadata(name, key, value):
"""
Args:
name (str): the name of the dataset split, e.g. "coco_train2017"
key: the key of the metadata, e.g., "class_names"
value: the value of the metadata
"""
DatasetRegistry._metadata_registry[name][key] = value
@staticmethod
def get_metadata(name, key):
"""
Args:
name (str): the name of the dataset split, e.g. "coco_train2017"
key: the key of the metadata, e.g., "class_names"
Returns:
value
"""
return DatasetRegistry._metadata_registry[name][key]
| 4,312 | 34.352459 | 103 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/dataset/__init__.py | from .dataset import *
from .coco import *
from .balloon import *
| 66 | 15.75 | 22 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/dataset/coco.py | # -*- coding: utf-8 -*-
import json
import numpy as np
import os
import tqdm
from tensorpack.utils import logger
from tensorpack.utils.timer import timed_operation
from config import config as cfg
from dataset import DatasetRegistry, DatasetSplit
__all__ = ['register_coco']
class COCODetection(DatasetSplit):
# handle a few special splits whose names do not match the directory names
_INSTANCE_TO_BASEDIR = {
'valminusminival2014': 'val2014',
'minival2014': 'val2014',
'val2017_100': 'val2017',
}
"""
Mapping from the incontinuous COCO category id to an id in [1, #category]
For your own coco-format, dataset, change this to an **empty dict**.
"""
COCO_id_to_category_id = {13: 12, 14: 13, 15: 14, 16: 15, 17: 16, 18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24, 27: 25, 28: 26, 31: 27, 32: 28, 33: 29, 34: 30, 35: 31, 36: 32, 37: 33, 38: 34, 39: 35, 40: 36, 41: 37, 42: 38, 43: 39, 44: 40, 46: 41, 47: 42, 48: 43, 49: 44, 50: 45, 51: 46, 52: 47, 53: 48, 54: 49, 55: 50, 56: 51, 57: 52, 58: 53, 59: 54, 60: 55, 61: 56, 62: 57, 63: 58, 64: 59, 65: 60, 67: 61, 70: 62, 72: 63, 73: 64, 74: 65, 75: 66, 76: 67, 77: 68, 78: 69, 79: 70, 80: 71, 81: 72, 82: 73, 84: 74, 85: 75, 86: 76, 87: 77, 88: 78, 89: 79, 90: 80} # noqa
def __init__(self, basedir, split):
"""
Args:
basedir (str): root of the dataset which contains the subdirectories for each split and annotations
split (str): the name of the split, e.g. "train2017".
The split has to match an annotation file in "annotations/" and a directory of images.
Examples:
For a directory of this structure:
DIR/
annotations/
instances_XX.json
instances_YY.json
XX/
YY/
use `COCODetection(DIR, 'XX')` and `COCODetection(DIR, 'YY')`
"""
basedir = os.path.expanduser(basedir)
self._imgdir = os.path.realpath(os.path.join(
basedir, self._INSTANCE_TO_BASEDIR.get(split, split)))
assert os.path.isdir(self._imgdir), "{} is not a directory!".format(self._imgdir)
annotation_file = os.path.join(
basedir, 'annotations/instances_{}.json'.format(split))
assert os.path.isfile(annotation_file), annotation_file
from pycocotools.coco import COCO
self.coco = COCO(annotation_file)
self.annotation_file = annotation_file
logger.info("Instances loaded from {}.".format(annotation_file))
# https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
def print_coco_metrics(self, results):
"""
Args:
results(list[dict]): results in coco format
Returns:
dict: the evaluation metrics
"""
from pycocotools.cocoeval import COCOeval
ret = {}
has_mask = "segmentation" in results[0] # results will be modified by loadRes
cocoDt = self.coco.loadRes(results)
cocoEval = COCOeval(self.coco, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
fields = ['IoU=0.5:0.95', 'IoU=0.5', 'IoU=0.75', 'small', 'medium', 'large']
for k in range(6):
ret['mAP(bbox)/' + fields[k]] = cocoEval.stats[k]
if len(results) > 0 and has_mask:
cocoEval = COCOeval(self.coco, cocoDt, 'segm')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
for k in range(6):
ret['mAP(segm)/' + fields[k]] = cocoEval.stats[k]
return ret
def load(self, add_gt=True, add_mask=False):
"""
Args:
add_gt: whether to add ground truth bounding box annotations to the dicts
add_mask: whether to also add ground truth mask
Returns:
a list of dict, each has keys including:
'image_id', 'file_name',
and (if add_gt is True) 'boxes', 'class', 'is_crowd', and optionally
'segmentation'.
"""
with timed_operation('Load annotations for {}'.format(
os.path.basename(self.annotation_file))):
img_ids = self.coco.getImgIds()
img_ids.sort()
# list of dict, each has keys: height,width,id,file_name
imgs = self.coco.loadImgs(img_ids)
for idx, img in enumerate(tqdm.tqdm(imgs)):
img['image_id'] = img.pop('id')
img['file_name'] = os.path.join(self._imgdir, img['file_name'])
if idx == 0:
# make sure the directories are correctly set
assert os.path.isfile(img["file_name"]), img["file_name"]
if add_gt:
self._add_detection_gt(img, add_mask)
return imgs
def _add_detection_gt(self, img, add_mask):
"""
Add 'boxes', 'class', 'is_crowd' of this image to the dict, used by detection.
If add_mask is True, also add 'segmentation' in coco poly format.
"""
# ann_ids = self.coco.getAnnIds(imgIds=img['image_id'])
# objs = self.coco.loadAnns(ann_ids)
objs = self.coco.imgToAnns[img['image_id']] # equivalent but faster than the above two lines
if 'minival' not in self.annotation_file:
# TODO better to check across the entire json, rather than per-image
ann_ids = [ann["id"] for ann in objs]
assert len(set(ann_ids)) == len(ann_ids), \
"Annotation ids in '{}' are not unique!".format(self.annotation_file)
# clean-up boxes
width = img.pop('width')
height = img.pop('height')
all_boxes = []
all_segm = []
all_cls = []
all_iscrowd = []
for objid, obj in enumerate(objs):
if obj.get('ignore', 0) == 1:
continue
x1, y1, w, h = list(map(float, obj['bbox']))
# bbox is originally in float
# x1/y1 means upper-left corner and w/h means true w/h. This can be verified by segmentation pixels.
# But we do make an assumption here that (0.0, 0.0) is upper-left corner of the first pixel
x2, y2 = x1 + w, y1 + h
# np.clip would be quite slow here
x1 = min(max(x1, 0), width)
x2 = min(max(x2, 0), width)
y1 = min(max(y1, 0), height)
y2 = min(max(y2, 0), height)
w, h = x2 - x1, y2 - y1
# Require non-zero seg area and more than 1x1 box size
if obj['area'] > 1 and w > 0 and h > 0:
all_boxes.append([x1, y1, x2, y2])
all_cls.append(self.COCO_id_to_category_id.get(obj['category_id'], obj['category_id']))
iscrowd = obj.get("iscrowd", 0)
all_iscrowd.append(iscrowd)
if add_mask:
segs = obj['segmentation']
if not isinstance(segs, list):
assert iscrowd == 1
all_segm.append(None)
else:
valid_segs = [np.asarray(p).reshape(-1, 2).astype('float32') for p in segs if len(p) >= 6]
if len(valid_segs) == 0:
logger.error("Object {} in image {} has no valid polygons!".format(objid, img['file_name']))
elif len(valid_segs) < len(segs):
logger.warn("Object {} in image {} has invalid polygons!".format(objid, img['file_name']))
all_segm.append(valid_segs)
# all geometrically-valid boxes are returned
if len(all_boxes):
img['boxes'] = np.asarray(all_boxes, dtype='float32') # (n, 4)
else:
img['boxes'] = np.zeros((0, 4), dtype='float32')
cls = np.asarray(all_cls, dtype='int32') # (n,)
if len(cls):
assert cls.min() > 0, "Category id in COCO format must > 0!"
img['class'] = cls # n, always >0
img['is_crowd'] = np.asarray(all_iscrowd, dtype='int8') # n,
if add_mask:
# also required to be float32
img['segmentation'] = all_segm
def training_roidbs(self):
return self.load(add_gt=True, add_mask=cfg.MODE_MASK)
def inference_roidbs(self):
return self.load(add_gt=False)
def eval_inference_results(self, results, output=None):
continuous_id_to_COCO_id = {v: k for k, v in self.COCO_id_to_category_id.items()}
for res in results:
# convert to COCO's incontinuous category id
if res['category_id'] in continuous_id_to_COCO_id:
res['category_id'] = continuous_id_to_COCO_id[res['category_id']]
# COCO expects results in xywh format
box = res['bbox']
box[2] -= box[0]
box[3] -= box[1]
res['bbox'] = [round(float(x), 3) for x in box]
if output is not None:
with open(output, 'w') as f:
json.dump(results, f)
if len(results):
# sometimes may crash if the results are empty?
return self.print_coco_metrics(results)
else:
return {}
def register_coco(basedir):
"""
Add COCO datasets like "coco_train201x" to the registry,
so you can refer to them with names in `cfg.DATA.TRAIN/VAL`.
Note that train2017==trainval35k==train2014+val2014-minival2014, and val2017==minival2014.
"""
# 80 names for COCO
# For your own coco-format dataset, change this.
class_names = [
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] # noqa
class_names = ["BG"] + class_names
for split in ["train2017", "val2017", "train2014", "val2014",
"valminusminival2014", "minival2014", "val2017_100"]:
name = "coco_" + split
DatasetRegistry.register(name, lambda x=split: COCODetection(basedir, x))
DatasetRegistry.register_metadata(name, 'class_names', class_names)
if __name__ == '__main__':
basedir = '~/data/coco'
c = COCODetection(basedir, 'train2014')
roidb = c.load(add_gt=True, add_mask=True)
print("#Images:", len(roidb))
| 11,153 | 44.157895 | 876 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/utils/box_ops.py | # -*- coding: utf-8 -*-
# File: box_ops.py
import tensorflow as tf
from tensorpack.tfutils.scope_utils import under_name_scope
"""
This file is modified from
https://github.com/tensorflow/models/blob/master/object_detection/core/box_list_ops.py
"""
@under_name_scope()
def area(boxes):
"""
Args:
boxes: nx4 floatbox
Returns:
n
"""
x_min, y_min, x_max, y_max = tf.split(boxes, 4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
@under_name_scope()
def pairwise_intersection(boxlist1, boxlist2):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1)
x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
@under_name_scope()
def pairwise_iou(boxlist1, boxlist2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
intersections = pairwise_intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
| 2,000 | 28 | 86 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/utils/np_box_ops.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, 4] numpy arrays representing bounding boxes.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
import numpy as np
def area(boxes):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape, dtype='f4'),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape, dtype='f4'),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding M boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2)
area1 = area(boxes1)
area2 = area(boxes2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
def ioa(boxes1, boxes2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, IOA(box1, box2) != IOA(box2, box1).
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
intersect = intersection(boxes1, boxes2)
inv_areas = np.expand_dims(1.0 / area(boxes2), axis=0)
return intersect * inv_areas
| 3,358 | 33.27551 | 80 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/modeling/model_mrcnn.py | # -*- coding: utf-8 -*-
import tensorflow as tf
from tensorpack.models import Conv2D, Conv2DTranspose, layer_register
from tensorpack.tfutils.argscope import argscope
from tensorpack.tfutils.common import get_tf_version_tuple
from tensorpack.tfutils.scope_utils import under_name_scope
from tensorpack.tfutils.summary import add_moving_summary
from .backbone import GroupNorm
from config import config as cfg
@under_name_scope()
def maskrcnn_loss(mask_logits, fg_labels, fg_target_masks):
"""
Args:
mask_logits: #fg x #category xhxw
fg_labels: #fg, in 1~#class, int64
fg_target_masks: #fgxhxw, float32
"""
if get_tf_version_tuple() >= (1, 14):
mask_logits = tf.gather(
mask_logits, tf.reshape(fg_labels - 1, [-1, 1]), batch_dims=1)
mask_logits = tf.squeeze(mask_logits, axis=1)
else:
indices = tf.stack([tf.range(tf.size(fg_labels, out_type=tf.int64)),
fg_labels - 1], axis=1) # #fgx2
mask_logits = tf.gather_nd(mask_logits, indices) # #fg x h x w
mask_probs = tf.sigmoid(mask_logits)
# add some training visualizations to tensorboard
with tf.name_scope('mask_viz'):
viz = tf.concat([fg_target_masks, mask_probs], axis=1)
viz = tf.expand_dims(viz, 3)
viz = tf.cast(viz * 255, tf.uint8, name='viz')
tf.summary.image('mask_truth|pred', viz, max_outputs=10)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=fg_target_masks, logits=mask_logits)
loss = tf.reduce_mean(loss, name='maskrcnn_loss')
pred_label = mask_probs > 0.5
truth_label = fg_target_masks > 0.5
accuracy = tf.reduce_mean(
tf.cast(tf.equal(pred_label, truth_label), tf.float32),
name='accuracy')
pos_accuracy = tf.logical_and(
tf.equal(pred_label, truth_label),
tf.equal(truth_label, True))
pos_accuracy = tf.reduce_mean(tf.cast(pos_accuracy, tf.float32), name='pos_accuracy')
fg_pixel_ratio = tf.reduce_mean(tf.cast(truth_label, tf.float32), name='fg_pixel_ratio')
add_moving_summary(loss, accuracy, fg_pixel_ratio, pos_accuracy)
return loss
@layer_register(log_shape=True)
def maskrcnn_upXconv_head(feature, num_category, num_convs, norm=None):
"""
Args:
feature (NxCx s x s): size is 7 in C4 models and 14 in FPN models.
num_category(int):
num_convs (int): number of convolution layers
norm (str or None): either None or 'GN'
Returns:
mask_logits (N x num_category x 2s x 2s):
"""
assert norm in [None, 'GN'], norm
l = feature
with argscope([Conv2D, Conv2DTranspose], data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out',
distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
# c2's MSRAFill is fan_out
for k in range(num_convs):
l = Conv2D('fcn{}'.format(k), l, cfg.MRCNN.HEAD_DIM, 3, activation=tf.nn.relu)
if norm is not None:
l = GroupNorm('gn{}'.format(k), l)
l = Conv2DTranspose('deconv', l, cfg.MRCNN.HEAD_DIM, 2, strides=2, activation=tf.nn.relu)
l = Conv2D('conv', l, num_category, 1, kernel_initializer=tf.random_normal_initializer(stddev=0.001))
return l
def maskrcnn_up4conv_head(*args, **kwargs):
return maskrcnn_upXconv_head(*args, num_convs=4, **kwargs)
def maskrcnn_up4conv_gn_head(*args, **kwargs):
return maskrcnn_upXconv_head(*args, num_convs=4, norm='GN', **kwargs)
def unpackbits_masks(masks):
"""
Args:
masks (Tensor): uint8 Tensor of shape N, H, W. The last dimension is packed bits.
Returns:
masks (Tensor): bool Tensor of shape N, H, 8*W.
This is a reverse operation of `np.packbits`
"""
assert masks.dtype == tf.uint8, masks
bits = tf.constant((128, 64, 32, 16, 8, 4, 2, 1), dtype=tf.uint8)
unpacked = tf.bitwise.bitwise_and(tf.expand_dims(masks, -1), bits) > 0
unpacked = tf.reshape(
unpacked,
tf.concat([tf.shape(masks)[:-1], [8 * tf.shape(masks)[-1]]], axis=0))
return unpacked
| 4,215 | 36.309735 | 109 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/modeling/model_frcnn.py | # -*- coding: utf-8 -*-
# File: model_frcnn.py
import tensorflow as tf
from tensorpack.models import Conv2D, FullyConnected, layer_register
from tensorpack.tfutils.argscope import argscope
from tensorpack.tfutils.common import get_tf_version_tuple
from tensorpack.tfutils.scope_utils import under_name_scope
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.utils.argtools import memoized_method
from config import config as cfg
from utils.box_ops import pairwise_iou
from .model_box import decode_bbox_target, encode_bbox_target
from .backbone import GroupNorm
@under_name_scope()
def proposal_metrics(iou):
"""
Add summaries for RPN proposals.
Args:
iou: nxm, #proposal x #gt
"""
# find best roi for each gt, for summary only
best_iou = tf.reduce_max(iou, axis=0)
mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt')
summaries = [mean_best_iou]
with tf.device('/cpu:0'):
for th in [0.3, 0.5]:
recall = tf.truediv(
tf.count_nonzero(best_iou >= th),
tf.size(best_iou, out_type=tf.int64),
name='recall_iou{}'.format(th))
summaries.append(recall)
add_moving_summary(*summaries)
@under_name_scope()
def sample_fast_rcnn_targets(boxes, gt_boxes, gt_labels):
"""
Sample some boxes from all proposals for training.
#fg is guaranteed to be > 0, because ground truth boxes will be added as proposals.
Args:
boxes: nx4 region proposals, floatbox
gt_boxes: mx4, floatbox
gt_labels: m, int32
Returns:
A BoxProposals instance, with:
sampled_boxes: tx4 floatbox, the rois
sampled_labels: t int64 labels, in [0, #class). Positive means foreground.
fg_inds_wrt_gt: #fg indices, each in range [0, m-1].
It contains the matching GT of each foreground roi.
"""
iou = pairwise_iou(boxes, gt_boxes) # nxm
proposal_metrics(iou)
# add ground truth as proposals as well
boxes = tf.concat([boxes, gt_boxes], axis=0) # (n+m) x 4
iou = tf.concat([iou, tf.eye(tf.shape(gt_boxes)[0])], axis=0) # (n+m) x m
# #proposal=n+m from now on
def sample_fg_bg(iou):
fg_mask = tf.cond(tf.shape(iou)[1] > 0,
lambda: tf.reduce_max(iou, axis=1) >= cfg.FRCNN.FG_THRESH,
lambda: tf.zeros([tf.shape(iou)[0]], dtype=tf.bool))
fg_inds = tf.reshape(tf.where(fg_mask), [-1])
num_fg = tf.minimum(int(
cfg.FRCNN.BATCH_PER_IM * cfg.FRCNN.FG_RATIO),
tf.size(fg_inds), name='num_fg')
fg_inds = tf.random_shuffle(fg_inds)[:num_fg]
bg_inds = tf.reshape(tf.where(tf.logical_not(fg_mask)), [-1])
num_bg = tf.minimum(
cfg.FRCNN.BATCH_PER_IM - num_fg,
tf.size(bg_inds), name='num_bg')
bg_inds = tf.random_shuffle(bg_inds)[:num_bg]
add_moving_summary(num_fg, num_bg)
return fg_inds, bg_inds
fg_inds, bg_inds = sample_fg_bg(iou)
# fg,bg indices w.r.t proposals
best_iou_ind = tf.cond(tf.shape(iou)[1] > 0,
lambda: tf.argmax(iou, axis=1), # #proposal, each in 0~m-1
lambda: tf.zeros([tf.shape(iou)[0]], dtype=tf.int64))
fg_inds_wrt_gt = tf.gather(best_iou_ind, fg_inds) # num_fg
all_indices = tf.concat([fg_inds, bg_inds], axis=0) # indices w.r.t all n+m proposal boxes
ret_boxes = tf.gather(boxes, all_indices)
ret_labels = tf.concat(
[tf.gather(gt_labels, fg_inds_wrt_gt),
tf.zeros_like(bg_inds, dtype=tf.int64)], axis=0)
# stop the gradient -- they are meant to be training targets
return BoxProposals(
tf.stop_gradient(ret_boxes, name='sampled_proposal_boxes'),
tf.stop_gradient(ret_labels, name='sampled_labels'),
tf.stop_gradient(fg_inds_wrt_gt))
@layer_register(log_shape=True)
def fastrcnn_outputs(feature, num_categories, class_agnostic_regression=False):
"""
Args:
feature (any shape):
num_categories (int):
class_agnostic_regression (bool): if True, regression to N x 1 x 4
Returns:
cls_logits: N x num_class classification logits
reg_logits: N x num_classx4 or Nx1x4 if class agnostic
"""
num_classes = num_categories + 1
classification = FullyConnected(
'class', feature, num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=0.01))
num_classes_for_box = 1 if class_agnostic_regression else num_classes
box_regression = FullyConnected(
'box', feature, num_classes_for_box * 4,
kernel_initializer=tf.random_normal_initializer(stddev=0.001))
box_regression = tf.reshape(box_regression, (-1, num_classes_for_box, 4), name='output_box')
return classification, box_regression
@under_name_scope()
def fastrcnn_losses(labels, label_logits, fg_boxes, fg_box_logits):
"""
Args:
labels: n,
label_logits: nxC
fg_boxes: nfgx4, encoded
fg_box_logits: nfgxCx4 or nfgx1x4 if class agnostic
Returns:
label_loss, box_loss
"""
label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=label_logits)
label_loss = tf.reduce_mean(label_loss, name='label_loss')
fg_inds = tf.where(labels > 0)[:, 0]
fg_labels = tf.gather(labels, fg_inds)
num_fg = tf.size(fg_inds, out_type=tf.int64)
empty_fg = tf.equal(num_fg, 0)
if int(fg_box_logits.shape[1]) > 1:
if get_tf_version_tuple() >= (1, 14):
fg_labels = tf.expand_dims(fg_labels, axis=1) # nfg x 1
fg_box_logits = tf.gather(fg_box_logits, fg_labels, batch_dims=1)
else:
indices = tf.stack([tf.range(num_fg), fg_labels], axis=1) # nfgx2
fg_box_logits = tf.gather_nd(fg_box_logits, indices)
fg_box_logits = tf.reshape(fg_box_logits, [-1, 4]) # nfg x 4
with tf.name_scope('label_metrics'), tf.device('/cpu:0'):
prediction = tf.argmax(label_logits, axis=1, name='label_prediction')
correct = tf.cast(tf.equal(prediction, labels), tf.float32) # boolean/integer gather is unavailable on GPU
accuracy = tf.reduce_mean(correct, name='accuracy')
fg_label_pred = tf.argmax(tf.gather(label_logits, fg_inds), axis=1)
num_zero = tf.reduce_sum(tf.cast(tf.equal(fg_label_pred, 0), tf.int64), name='num_zero')
false_negative = tf.where(
empty_fg, 0., tf.cast(tf.truediv(num_zero, num_fg), tf.float32), name='false_negative')
fg_accuracy = tf.where(
empty_fg, 0., tf.reduce_mean(tf.gather(correct, fg_inds)), name='fg_accuracy')
box_loss = tf.reduce_sum(tf.abs(fg_boxes - fg_box_logits))
box_loss = tf.truediv(
box_loss, tf.cast(tf.shape(labels)[0], tf.float32), name='box_loss')
add_moving_summary(label_loss, box_loss, accuracy,
fg_accuracy, false_negative, tf.cast(num_fg, tf.float32, name='num_fg_label'))
return [label_loss, box_loss]
@under_name_scope()
def fastrcnn_predictions(boxes, scores):
"""
Generate final results from predictions of all proposals.
Args:
boxes: n#classx4 floatbox in float32
scores: nx#class
Returns:
boxes: Kx4
scores: K
labels: K
"""
assert boxes.shape[1] == scores.shape[1]
boxes = tf.transpose(boxes, [1, 0, 2])[1:, :, :] # #catxnx4
scores = tf.transpose(scores[:, 1:], [1, 0]) # #catxn
max_coord = tf.reduce_max(boxes)
filtered_ids = tf.where(scores > cfg.TEST.RESULT_SCORE_THRESH) # Fx2
filtered_boxes = tf.gather_nd(boxes, filtered_ids) # Fx4
filtered_scores = tf.gather_nd(scores, filtered_ids) # F,
cls_per_box = tf.slice(filtered_ids, [0, 0], [-1, 1])
offsets = tf.cast(cls_per_box, tf.float32) * (max_coord + 1) # F,1
nms_boxes = filtered_boxes + offsets
selection = tf.image.non_max_suppression(
nms_boxes,
filtered_scores,
cfg.TEST.RESULTS_PER_IM,
cfg.TEST.FRCNN_NMS_THRESH)
final_scores = tf.gather(filtered_scores, selection, name='scores')
final_labels = tf.add(tf.gather(cls_per_box[:, 0], selection), 1, name='labels')
final_boxes = tf.gather(filtered_boxes, selection, name='boxes')
return final_boxes, final_scores, final_labels
"""
FastRCNN heads for FPN:
"""
@layer_register(log_shape=True)
def fastrcnn_2fc_head(feature):
"""
Args:
feature (any shape):
Returns:
2D head feature
"""
dim = cfg.FPN.FRCNN_FC_HEAD_DIM
init = tf.variance_scaling_initializer()
hidden = FullyConnected('fc6', feature, dim, kernel_initializer=init, activation=tf.nn.relu)
hidden = FullyConnected('fc7', hidden, dim, kernel_initializer=init, activation=tf.nn.relu)
return hidden
@layer_register(log_shape=True)
def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None):
"""
Args:
feature (NCHW):
num_classes(int): num_category + 1
num_convs (int): number of conv layers
norm (str or None): either None or 'GN'
Returns:
2D head feature
"""
assert norm in [None, 'GN'], norm
l = feature
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out',
distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
for k in range(num_convs):
l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
if norm is not None:
l = GroupNorm('gn{}'.format(k), l)
l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
return l
def fastrcnn_4conv1fc_head(*args, **kwargs):
return fastrcnn_Xconv1fc_head(*args, num_convs=4, **kwargs)
def fastrcnn_4conv1fc_gn_head(*args, **kwargs):
return fastrcnn_Xconv1fc_head(*args, num_convs=4, norm='GN', **kwargs)
class BoxProposals(object):
"""
A structure to manage box proposals and their relations with ground truth.
"""
def __init__(self, boxes, labels=None, fg_inds_wrt_gt=None):
"""
Args:
boxes: Nx4
labels: N, each in [0, #class), the true label for each input box
fg_inds_wrt_gt: #fg, each in [0, M)
The last four arguments could be None when not training.
"""
for k, v in locals().items():
if k != 'self' and v is not None:
setattr(self, k, v)
@memoized_method
def fg_inds(self):
""" Returns: #fg indices in [0, N-1] """
return tf.reshape(tf.where(self.labels > 0), [-1], name='fg_inds')
@memoized_method
def fg_boxes(self):
""" Returns: #fg x4"""
return tf.gather(self.boxes, self.fg_inds(), name='fg_boxes')
@memoized_method
def fg_labels(self):
""" Returns: #fg"""
return tf.gather(self.labels, self.fg_inds(), name='fg_labels')
class FastRCNNHead(object):
"""
A class to process & decode inputs/outputs of a fastrcnn classification+regression head.
"""
def __init__(self, proposals, box_logits, label_logits, gt_boxes, bbox_regression_weights):
"""
Args:
proposals: BoxProposals
box_logits: Nx#classx4 or Nx1x4, the output of the head
label_logits: Nx#class, the output of the head
gt_boxes: Mx4
bbox_regression_weights: a 4 element tensor
"""
for k, v in locals().items():
if k != 'self' and v is not None:
setattr(self, k, v)
self._bbox_class_agnostic = int(box_logits.shape[1]) == 1
self._num_classes = box_logits.shape[1]
@memoized_method
def fg_box_logits(self):
""" Returns: #fg x ? x 4 """
return tf.gather(self.box_logits, self.proposals.fg_inds(), name='fg_box_logits')
@memoized_method
def losses(self):
encoded_fg_gt_boxes = encode_bbox_target(
tf.gather(self.gt_boxes, self.proposals.fg_inds_wrt_gt),
self.proposals.fg_boxes()) * self.bbox_regression_weights
return fastrcnn_losses(
self.proposals.labels, self.label_logits,
encoded_fg_gt_boxes, self.fg_box_logits()
)
@memoized_method
def decoded_output_boxes(self):
""" Returns: N x #class x 4 """
anchors = tf.tile(tf.expand_dims(self.proposals.boxes, 1),
[1, self._num_classes, 1]) # N x #class x 4
decoded_boxes = decode_bbox_target(
self.box_logits / self.bbox_regression_weights,
anchors
)
return decoded_boxes
@memoized_method
def decoded_output_boxes_for_true_label(self):
""" Returns: Nx4 decoded boxes """
return self._decoded_output_boxes_for_label(self.proposals.labels)
@memoized_method
def decoded_output_boxes_for_predicted_label(self):
""" Returns: Nx4 decoded boxes """
return self._decoded_output_boxes_for_label(self.predicted_labels())
@memoized_method
def decoded_output_boxes_for_label(self, labels):
assert not self._bbox_class_agnostic
indices = tf.stack([
tf.range(tf.size(labels, out_type=tf.int64)),
labels
])
needed_logits = tf.gather_nd(self.box_logits, indices)
decoded = decode_bbox_target(
needed_logits / self.bbox_regression_weights,
self.proposals.boxes
)
return decoded
@memoized_method
def decoded_output_boxes_class_agnostic(self):
""" Returns: Nx4 """
assert self._bbox_class_agnostic
box_logits = tf.reshape(self.box_logits, [-1, 4])
decoded = decode_bbox_target(
box_logits / self.bbox_regression_weights,
self.proposals.boxes
)
return decoded
@memoized_method
def output_scores(self, name=None):
""" Returns: N x #class scores, summed to one for each box."""
return tf.nn.softmax(self.label_logits, name=name)
@memoized_method
def predicted_labels(self):
""" Returns: N ints """
return tf.argmax(self.label_logits, axis=1, name='predicted_labels')
| 14,480 | 35.568182 | 115 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/modeling/model_rpn.py | # -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
from tensorpack.models import Conv2D, layer_register
from tensorpack.tfutils.argscope import argscope
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope, under_name_scope
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.utils.argtools import memoized
from config import config as cfg
from .model_box import clip_boxes
@layer_register(log_shape=True)
@auto_reuse_variable_scope
def rpn_head(featuremap, channel, num_anchors):
"""
Returns:
label_logits: fHxfWxNA
box_logits: fHxfWxNAx4
"""
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)):
hidden = Conv2D('conv0', featuremap, channel, 3, activation=tf.nn.relu)
label_logits = Conv2D('class', hidden, num_anchors, 1)
box_logits = Conv2D('box', hidden, 4 * num_anchors, 1)
# 1, NA(*4), im/16, im/16 (NCHW)
label_logits = tf.transpose(label_logits, [0, 2, 3, 1]) # 1xfHxfWxNA
label_logits = tf.squeeze(label_logits, 0) # fHxfWxNA
shp = tf.shape(box_logits) # 1x(NAx4)xfHxfW
box_logits = tf.transpose(box_logits, [0, 2, 3, 1]) # 1xfHxfWx(NAx4)
box_logits = tf.reshape(box_logits, tf.stack([shp[2], shp[3], num_anchors, 4])) # fHxfWxNAx4
return label_logits, box_logits
@under_name_scope()
def rpn_losses(anchor_labels, anchor_boxes, label_logits, box_logits):
"""
Args:
anchor_labels: fHxfWxNA
anchor_boxes: fHxfWxNAx4, encoded
label_logits: fHxfWxNA
box_logits: fHxfWxNAx4
Returns:
label_loss, box_loss
"""
with tf.device('/cpu:0'):
valid_mask = tf.stop_gradient(tf.not_equal(anchor_labels, -1))
pos_mask = tf.stop_gradient(tf.equal(anchor_labels, 1))
nr_valid = tf.stop_gradient(tf.count_nonzero(valid_mask, dtype=tf.int32), name='num_valid_anchor')
nr_pos = tf.identity(tf.count_nonzero(pos_mask, dtype=tf.int32), name='num_pos_anchor')
# nr_pos is guaranteed >0 in C4. But in FPN. even nr_valid could be 0.
valid_anchor_labels = tf.boolean_mask(anchor_labels, valid_mask)
valid_label_logits = tf.boolean_mask(label_logits, valid_mask)
with tf.name_scope('label_metrics'):
valid_label_prob = tf.nn.sigmoid(valid_label_logits)
summaries = []
with tf.device('/cpu:0'):
for th in [0.5, 0.2, 0.1]:
valid_prediction = tf.cast(valid_label_prob > th, tf.int32)
nr_pos_prediction = tf.reduce_sum(valid_prediction, name='num_pos_prediction')
pos_prediction_corr = tf.count_nonzero(
tf.logical_and(
valid_label_prob > th,
tf.equal(valid_prediction, valid_anchor_labels)),
dtype=tf.int32)
placeholder = 0.5 # A small value will make summaries appear lower.
recall = tf.cast(tf.truediv(pos_prediction_corr, nr_pos), tf.float32)
recall = tf.where(tf.equal(nr_pos, 0), placeholder, recall, name='recall_th{}'.format(th))
precision = tf.cast(tf.truediv(pos_prediction_corr, nr_pos_prediction), tf.float32)
precision = tf.where(tf.equal(nr_pos_prediction, 0),
placeholder, precision, name='precision_th{}'.format(th))
summaries.extend([precision, recall])
add_moving_summary(*summaries)
# Per-level loss summaries in FPN may appear lower due to the use of a small placeholder.
# But the total RPN loss will be fine. TODO make the summary op smarter
placeholder = 0.
label_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(valid_anchor_labels, tf.float32), logits=valid_label_logits)
label_loss = tf.reduce_sum(label_loss) * (1. / cfg.RPN.BATCH_PER_IM)
label_loss = tf.where(tf.equal(nr_valid, 0), placeholder, label_loss, name='label_loss')
pos_anchor_boxes = tf.boolean_mask(anchor_boxes, pos_mask)
pos_box_logits = tf.boolean_mask(box_logits, pos_mask)
delta = 1.0 / 9
box_loss = tf.losses.huber_loss(
pos_anchor_boxes, pos_box_logits, delta=delta,
reduction=tf.losses.Reduction.SUM) / delta
box_loss = box_loss * (1. / cfg.RPN.BATCH_PER_IM)
box_loss = tf.where(tf.equal(nr_pos, 0), placeholder, box_loss, name='box_loss')
add_moving_summary(label_loss, box_loss, nr_valid, nr_pos)
return [label_loss, box_loss]
@under_name_scope()
def generate_rpn_proposals(boxes, scores, img_shape,
pre_nms_topk, post_nms_topk=None):
"""
Sample RPN proposals by the following steps:
1. Pick top k1 by scores
2. NMS them
3. Pick top k2 by scores. Default k2 == k1, i.e. does not filter the NMS output.
Args:
boxes: nx4 float dtype, the proposal boxes. Decoded to floatbox already
scores: n float, the logits
img_shape: [h, w]
pre_nms_topk, post_nms_topk (int): See above.
Returns:
boxes: kx4 float
scores: k logits
"""
assert boxes.shape.ndims == 2, boxes.shape
if post_nms_topk is None:
post_nms_topk = pre_nms_topk
topk = tf.minimum(pre_nms_topk, tf.size(scores))
topk_scores, topk_indices = tf.nn.top_k(scores, k=topk, sorted=False)
topk_boxes = tf.gather(boxes, topk_indices)
topk_boxes = clip_boxes(topk_boxes, img_shape)
if cfg.RPN.MIN_SIZE > 0:
topk_boxes_x1y1x2y2 = tf.reshape(topk_boxes, (-1, 2, 2))
topk_boxes_x1y1, topk_boxes_x2y2 = tf.split(topk_boxes_x1y1x2y2, 2, axis=1)
# nx1x2 each
wbhb = tf.squeeze(topk_boxes_x2y2 - topk_boxes_x1y1, axis=1)
valid = tf.reduce_all(wbhb > cfg.RPN.MIN_SIZE, axis=1) # n,
topk_valid_boxes = tf.boolean_mask(topk_boxes, valid)
topk_valid_scores = tf.boolean_mask(topk_scores, valid)
else:
topk_valid_boxes = topk_boxes
topk_valid_scores = topk_scores
nms_indices = tf.image.non_max_suppression(
topk_valid_boxes,
topk_valid_scores,
max_output_size=post_nms_topk,
iou_threshold=cfg.RPN.PROPOSAL_NMS_THRESH)
proposal_boxes = tf.gather(topk_valid_boxes, nms_indices)
proposal_scores = tf.gather(topk_valid_scores, nms_indices)
tf.sigmoid(proposal_scores, name='probs') # for visualization
return tf.stop_gradient(proposal_boxes, name='boxes'), tf.stop_gradient(proposal_scores, name='scores')
@memoized
def get_all_anchors(*, stride, sizes, ratios, max_size):
"""
Get all anchors in the largest possible image, shifted, floatbox
Args:
stride (int): the stride of anchors.
sizes (tuple[int]): the sizes (sqrt area) of anchors
ratios (tuple[int]): the aspect ratios of anchors
max_size (int): maximum size of input image
Returns:
anchors: SxSxNUM_ANCHORx4, where S == ceil(MAX_SIZE/STRIDE), floatbox
The layout in the NUM_ANCHOR dim is NUM_RATIO x NUM_SIZE.
"""
# Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
# are centered on 0, have sqrt areas equal to the specified sizes, and aspect ratios as given.
anchors = []
for sz in sizes:
for ratio in ratios:
w = np.sqrt(sz * sz / ratio)
h = ratio * w
anchors.append([-w, -h, w, h])
cell_anchors = np.asarray(anchors) * 0.5
field_size = int(np.ceil(max_size / stride))
shifts = (np.arange(0, field_size) * stride).astype("float32")
shift_x, shift_y = np.meshgrid(shifts, shifts)
shift_x = shift_x.flatten()
shift_y = shift_y.flatten()
shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()
# Kx4, K = field_size * field_size
K = shifts.shape[0]
A = cell_anchors.shape[0]
field_of_anchors = cell_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4))
# FSxFSxAx4
# Many rounding happens inside the anchor code anyway
# assert np.all(field_of_anchors == field_of_anchors.astype('int32'))
field_of_anchors = field_of_anchors.astype("float32")
return field_of_anchors
| 8,343 | 40.929648 | 107 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/modeling/generalized_rcnn.py | # -*- coding: utf-8 -*-
# File:
import tensorflow as tf
from tensorpack import ModelDesc
from tensorpack.models import GlobalAvgPooling, l2_regularizer, regularize_cost
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.summary import add_moving_summary
from config import config as cfg
from data import get_all_anchors, get_all_anchors_fpn
from utils.box_ops import area as tf_area
from . import model_frcnn
from . import model_mrcnn
from .backbone import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone
from .model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align
from .model_cascade import CascadeRCNNHead
from .model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses
from .model_frcnn import (
BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets)
from .model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head, unpackbits_masks
from .model_rpn import generate_rpn_proposals, rpn_head, rpn_losses
class GeneralizedRCNN(ModelDesc):
def preprocess(self, image):
image = tf.expand_dims(image, 0)
image = image_preprocess(image, bgr=True)
return tf.transpose(image, [0, 3, 1, 2])
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0., trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
# The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.
lr = lr / 8.
opt = tf.train.MomentumOptimizer(lr, 0.9)
if cfg.TRAIN.NUM_GPUS < 8:
opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
return opt
def get_inference_tensor_names(self):
"""
Returns two lists of tensor names to be used to create an inference callable.
`build_graph` must create tensors of these names when called under inference context.
Returns:
[str]: input names
[str]: output names
"""
out = ['output/boxes', 'output/scores', 'output/labels']
if cfg.MODE_MASK:
out.append('output/masks')
return ['image'], out
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
if "gt_masks_packed" in inputs:
gt_masks = tf.cast(unpackbits_masks(inputs.pop("gt_masks_packed")), tf.uint8, name="gt_masks")
inputs["gt_masks"] = gt_masks
image = self.preprocess(inputs['image']) # 1CHW
features = self.backbone(image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
gt_boxes_area = tf.reduce_mean(tf_area(inputs["gt_boxes"]), name='mean_gt_box_area')
add_moving_summary(gt_boxes_area)
head_losses = self.roi_heads(image, features, proposals, targets)
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
else:
# Check that the model defines the tensors it declares for inference
# For existing models, they are defined in "fastrcnn_predictions(name_scope='output')"
G = tf.get_default_graph()
ns = G.get_name_scope()
for name in self.get_inference_tensor_names()[1]:
try:
name = '/'.join([ns, name]) if ns else name
G.get_tensor_by_name(name + ':0')
except KeyError:
raise KeyError("Your model does not define the tensor '{}' in inference context.".format(name))
class ResNetC4Model(GeneralizedRCNN):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR), tf.int32, 'anchor_labels'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR, 4), tf.float32, 'anchor_boxes'),
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')] # all > 0
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks_packed')
) # NR_GT x height x ceil(width/8), packed groundtruth masks
return ret
def backbone(self, image):
return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]
def rpn(self, image, features, inputs):
featuremap = features[0]
rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)
anchors = RPNAnchors(
get_all_anchors(
stride=cfg.RPN.ANCHOR_STRIDE, sizes=cfg.RPN.ANCHOR_SIZES,
ratios=cfg.RPN.ANCHOR_RATIOS, max_size=cfg.PREPROC.MAX_SIZE),
inputs['anchor_labels'], inputs['anchor_boxes'])
anchors = anchors.narrow_to(featuremap)
image_shape2d = tf.shape(image)[2:] # h,w
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)
if self.training:
losses = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
featuremap = features[0]
gt_boxes, gt_labels, *_ = targets
if self.training:
# sample proposal boxes in training
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
# The boxes to be used to crop RoIs.
# Use all proposal boxes in inference
boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)
feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7
# Keep C5 feature to be shared with mask branch
feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CATEGORY)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,
tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
# In training, mask branch shares the same C5 feature.
fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 14,
pad_border=False) # nfg x 1x14x14
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)
feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14
tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNModel(GeneralizedRCNN):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.TensorSpec((None, None, num_anchors), tf.int32,
'anchor_labels_lvl{}'.format(k + 2)),
tf.TensorSpec((None, None, num_anchors, 4), tf.float32,
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')]) # all > 0
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks_packed')
)
return ret
def slice_feature_and_anchors(self, p23456, anchors):
for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):
with tf.name_scope('FPN_slice_lvl{}'.format(i)):
anchors[i] = anchors[i].narrow_to(p23456[i])
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
p23456 = fpn_model('fpn', c2345)
return p23456
def rpn(self, image, features, inputs):
assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)
image_shape2d = tf.shape(image)[2:] # h,w
all_anchors_fpn = get_all_anchors_fpn(
strides=cfg.FPN.ANCHOR_STRIDES,
sizes=cfg.RPN.ANCHOR_SIZES,
ratios=cfg.RPN.ANCHOR_RATIOS,
max_size=cfg.PREPROC.MAX_SIZE)
multilevel_anchors = [RPNAnchors(
all_anchors_fpn[i],
inputs['anchor_labels_lvl{}'.format(i + 2)],
inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]
self.slice_feature_and_anchors(features, multilevel_anchors)
# Multi-Level RPN Proposals
rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))
for pi in features]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
multilevel_pred_boxes = [anchor.decode_logits(logits)
for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d)
if self.training:
losses = multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if not cfg.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(
'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CATEGORY)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(features[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CATEGORY)
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False) # fg x 1x28x28
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
# Cascade inference needs roi transform with refined boxes.
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28
tf.sigmoid(final_mask_logits, name='output/masks')
return []
| 15,549 | 46.553517 | 117 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/modeling/backbone.py | # -*- coding: utf-8 -*-
# File: backbone.py
import numpy as np
import tensorflow as tf
from contextlib import ExitStack, contextmanager
from tensorpack.models import BatchNorm, Conv2D, MaxPooling, layer_register
from tensorpack.tfutils import argscope
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
from tensorpack.tfutils.varreplace import custom_getter_scope, freeze_variables
from config import config as cfg
@layer_register(log_shape=True)
def GroupNorm(x, group=32, gamma_initializer=tf.constant_initializer(1.)):
"""
More code that reproduces the paper can be found at https://github.com/ppwwyyxx/GroupNorm-reproduce/.
"""
shape = x.get_shape().as_list()
ndims = len(shape)
assert ndims == 4, shape
chan = shape[1]
assert chan % group == 0, chan
group_size = chan // group
orig_shape = tf.shape(x)
h, w = orig_shape[2], orig_shape[3]
x = tf.reshape(x, tf.stack([-1, group, group_size, h, w]))
mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
new_shape = [1, group, group_size, 1, 1]
beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
gamma = tf.get_variable('gamma', [chan], initializer=gamma_initializer)
gamma = tf.reshape(gamma, new_shape)
out = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5, name='output')
return tf.reshape(out, orig_shape, name='output')
def freeze_affine_getter(getter, *args, **kwargs):
# custom getter to freeze affine params inside bn
name = args[0] if len(args) else kwargs.get('name')
if name.endswith('/gamma') or name.endswith('/beta'):
kwargs['trainable'] = False
ret = getter(*args, **kwargs)
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, ret)
else:
ret = getter(*args, **kwargs)
return ret
def maybe_reverse_pad(topleft, bottomright):
if cfg.BACKBONE.TF_PAD_MODE:
return [topleft, bottomright]
return [bottomright, topleft]
@contextmanager
def backbone_scope(freeze):
"""
Args:
freeze (bool): whether to freeze all the variables under the scope
"""
def nonlin(x):
x = get_norm()(x)
return tf.nn.relu(x)
with argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'), \
argscope(Conv2D, use_bias=False, activation=nonlin,
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out')), \
ExitStack() as stack:
if cfg.BACKBONE.NORM in ['FreezeBN', 'SyncBN']:
if freeze or cfg.BACKBONE.NORM == 'FreezeBN':
stack.enter_context(argscope(BatchNorm, training=False))
else:
stack.enter_context(argscope(
BatchNorm, sync_statistics='nccl' if cfg.TRAINER == 'replicated' else 'horovod'))
if freeze:
stack.enter_context(freeze_variables(stop_gradient=False, skip_collection=True))
else:
# the layers are not completely freezed, but we may want to only freeze the affine
if cfg.BACKBONE.FREEZE_AFFINE:
stack.enter_context(custom_getter_scope(freeze_affine_getter))
yield
def image_preprocess(image, bgr=True):
with tf.name_scope('image_preprocess'):
if image.dtype.base_dtype != tf.float32:
image = tf.cast(image, tf.float32)
mean = cfg.PREPROC.PIXEL_MEAN
std = np.asarray(cfg.PREPROC.PIXEL_STD)
if bgr:
mean = mean[::-1]
std = std[::-1]
image_mean = tf.constant(mean, dtype=tf.float32)
image_invstd = tf.constant(1.0 / std, dtype=tf.float32)
image = (image - image_mean) * image_invstd
return image
def get_norm(zero_init=False):
if cfg.BACKBONE.NORM == 'None':
return lambda x: x
if cfg.BACKBONE.NORM == 'GN':
Norm = GroupNorm
layer_name = 'gn'
else:
Norm = BatchNorm
layer_name = 'bn'
return lambda x: Norm(layer_name, x, gamma_initializer=tf.zeros_initializer() if zero_init else None)
def resnet_shortcut(l, n_out, stride, activation=tf.identity):
n_in = l.shape[1]
if n_in != n_out: # change dimension when channel is not the same
# TF's SAME mode output ceil(x/stride), which is NOT what we want when x is odd and stride is 2
# In FPN mode, the images are pre-padded already.
if not cfg.MODE_FPN and stride == 2:
l = l[:, :, :-1, :-1]
return Conv2D('convshortcut', l, n_out, 1,
strides=stride, activation=activation)
else:
return l
def resnet_bottleneck(l, ch_out, stride):
shortcut = l
if cfg.BACKBONE.STRIDE_1X1:
if stride == 2:
l = l[:, :, :-1, :-1]
l = Conv2D('conv1', l, ch_out, 1, strides=stride)
l = Conv2D('conv2', l, ch_out, 3, strides=1)
else:
l = Conv2D('conv1', l, ch_out, 1, strides=1)
if stride == 2:
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = Conv2D('conv2', l, ch_out, 3, strides=2, padding='VALID')
else:
l = Conv2D('conv2', l, ch_out, 3, strides=stride)
if cfg.BACKBONE.NORM != 'None':
l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_norm(zero_init=True))
else:
l = Conv2D('conv3', l, ch_out * 4, 1, activation=tf.identity,
kernel_initializer=tf.constant_initializer())
ret = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_norm(zero_init=False))
return tf.nn.relu(ret, name='output')
def resnet_group(name, l, block_func, features, count, stride):
with tf.variable_scope(name):
for i in range(0, count):
with tf.variable_scope('block{}'.format(i)):
l = block_func(l, features, stride if i == 0 else 1)
return l
def resnet_c4_backbone(image, num_blocks):
assert len(num_blocks) == 3
freeze_at = cfg.BACKBONE.FREEZE_AT
with backbone_scope(freeze=freeze_at > 0):
l = tf.pad(image, [[0, 0], [0, 0], maybe_reverse_pad(2, 3), maybe_reverse_pad(2, 3)])
l = Conv2D('conv0', l, 64, 7, strides=2, padding='VALID')
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = MaxPooling('pool0', l, 3, strides=2, padding='VALID')
with backbone_scope(freeze=freeze_at > 1):
c2 = resnet_group('group0', l, resnet_bottleneck, 64, num_blocks[0], 1)
with backbone_scope(freeze=False):
c3 = resnet_group('group1', c2, resnet_bottleneck, 128, num_blocks[1], 2)
c4 = resnet_group('group2', c3, resnet_bottleneck, 256, num_blocks[2], 2)
# 16x downsampling up to now
return c4
@auto_reuse_variable_scope
def resnet_conv5(image, num_block):
with backbone_scope(freeze=False):
l = resnet_group('group3', image, resnet_bottleneck, 512, num_block, 2)
return l
def resnet_fpn_backbone(image, num_blocks):
freeze_at = cfg.BACKBONE.FREEZE_AT
shape2d = tf.shape(image)[2:]
mult = float(cfg.FPN.RESOLUTION_REQUIREMENT)
new_shape2d = tf.cast(tf.ceil(tf.cast(shape2d, tf.float32) / mult) * mult, tf.int32)
pad_shape2d = new_shape2d - shape2d
assert len(num_blocks) == 4, num_blocks
with backbone_scope(freeze=freeze_at > 0):
chan = image.shape[1]
pad_base = maybe_reverse_pad(2, 3)
l = tf.pad(image, tf.stack(
[[0, 0], [0, 0],
[pad_base[0], pad_base[1] + pad_shape2d[0]],
[pad_base[0], pad_base[1] + pad_shape2d[1]]]))
l.set_shape([None, chan, None, None])
l = Conv2D('conv0', l, 64, 7, strides=2, padding='VALID')
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = MaxPooling('pool0', l, 3, strides=2, padding='VALID')
with backbone_scope(freeze=freeze_at > 1):
c2 = resnet_group('group0', l, resnet_bottleneck, 64, num_blocks[0], 1)
with backbone_scope(freeze=False):
c3 = resnet_group('group1', c2, resnet_bottleneck, 128, num_blocks[1], 2)
c4 = resnet_group('group2', c3, resnet_bottleneck, 256, num_blocks[2], 2)
c5 = resnet_group('group3', c4, resnet_bottleneck, 512, num_blocks[3], 2)
# 32x downsampling up to now
# size of c5: ceil(input/32)
return c2, c3, c4, c5
| 8,447 | 37.4 | 105 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/modeling/model_cascade.py | import tensorflow as tf
from tensorpack.tfutils import get_current_tower_context
from config import config as cfg
from utils.box_ops import pairwise_iou, area as tf_area
from .model_box import clip_boxes
from .model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs
class CascadeRCNNHead(object):
def __init__(self, proposals,
roi_func, fastrcnn_head_func, gt_targets, image_shape2d,
num_categories):
"""
Args:
proposals: BoxProposals
roi_func (boxes -> features): a function to crop features with rois
fastrcnn_head_func (features -> features): the fastrcnn head to apply on the cropped features
gt_targets (gt_boxes, gt_labels):
"""
for k, v in locals().items():
if k != 'self':
setattr(self, k, v)
self.gt_boxes, self.gt_labels = gt_targets
del self.gt_targets
self.num_cascade_stages = len(cfg.CASCADE.IOUS)
self.training = get_current_tower_context().is_training
if self.training:
@tf.custom_gradient
def scale_gradient(x):
return x, lambda dy: dy * (1.0 / self.num_cascade_stages)
self.scale_gradient = scale_gradient
else:
self.scale_gradient = tf.identity
ious = cfg.CASCADE.IOUS
# It's unclear how to do >3 stages, so it does not make sense to implement them
assert self.num_cascade_stages == 3, "Only 3-stage cascade was implemented!"
with tf.variable_scope('cascade_rcnn_stage1'):
H1, B1 = self.run_head(self.proposals, 0)
with tf.variable_scope('cascade_rcnn_stage2'):
B1_proposal = self.match_box_with_gt(B1, ious[1])
H2, B2 = self.run_head(B1_proposal, 1)
with tf.variable_scope('cascade_rcnn_stage3'):
B2_proposal = self.match_box_with_gt(B2, ious[2])
H3, B3 = self.run_head(B2_proposal, 2)
self._cascade_boxes = [B1, B2, B3]
self._heads = [H1, H2, H3]
def run_head(self, proposals, stage):
"""
Args:
proposals: BoxProposals
stage: 0, 1, 2
Returns:
FastRCNNHead
Nx4, updated boxes
"""
reg_weights = tf.constant(cfg.CASCADE.BBOX_REG_WEIGHTS[stage], dtype=tf.float32)
pooled_feature = self.roi_func(proposals.boxes) # N,C,S,S
pooled_feature = self.scale_gradient(pooled_feature)
head_feature = self.fastrcnn_head_func('head', pooled_feature)
label_logits, box_logits = fastrcnn_outputs(
'outputs', head_feature, self.num_categories, class_agnostic_regression=True)
head = FastRCNNHead(proposals, box_logits, label_logits, self.gt_boxes, reg_weights)
refined_boxes = head.decoded_output_boxes_class_agnostic()
refined_boxes = clip_boxes(refined_boxes, self.image_shape2d)
if self.training:
refined_boxes = tf.boolean_mask(refined_boxes, tf_area(refined_boxes) > 0)
return head, tf.stop_gradient(refined_boxes, name='output_boxes')
def match_box_with_gt(self, boxes, iou_threshold):
"""
Args:
boxes: Nx4
Returns:
BoxProposals
"""
if self.training:
with tf.name_scope('match_box_with_gt_{}'.format(iou_threshold)):
iou = pairwise_iou(boxes, self.gt_boxes) # NxM
max_iou_per_box = tf.reduce_max(iou, axis=1) # N
best_iou_ind = tf.cond(tf.shape(iou)[1] > 0,
lambda: tf.argmax(iou, axis=1), # #proposal, each in 0~m-1
lambda: tf.zeros([tf.shape(iou)[0]], dtype=tf.int64))
labels_per_box = tf.gather(self.gt_labels, best_iou_ind)
fg_mask = max_iou_per_box >= iou_threshold
fg_inds_wrt_gt = tf.boolean_mask(best_iou_ind, fg_mask)
labels_per_box = tf.stop_gradient(labels_per_box * tf.cast(fg_mask, tf.int64))
return BoxProposals(boxes, labels_per_box, fg_inds_wrt_gt)
else:
return BoxProposals(boxes)
def losses(self):
ret = []
for idx, head in enumerate(self._heads):
with tf.name_scope('cascade_loss_stage{}'.format(idx + 1)):
ret.extend(head.losses())
return ret
def decoded_output_boxes(self):
"""
Returns:
Nx#classx4
"""
ret = self._cascade_boxes[-1]
ret = tf.expand_dims(ret, 1) # class-agnostic
return tf.tile(ret, [1, self.num_categories + 1, 1])
def output_scores(self, name=None):
"""
Returns:
Nx#class
"""
scores = [head.output_scores('cascade_scores_stage{}'.format(idx + 1))
for idx, head in enumerate(self._heads)]
return tf.multiply(tf.add_n(scores), (1.0 / self.num_cascade_stages), name=name)
| 5,018 | 39.152 | 105 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/modeling/model_fpn.py | # -*- coding: utf-8 -*-
import itertools
import numpy as np
import tensorflow as tf
from tensorpack.models import Conv2D, FixedUnPooling, MaxPooling, layer_register
from tensorpack.tfutils.argscope import argscope
from tensorpack.tfutils.scope_utils import under_name_scope
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils.tower import get_current_tower_context
from tensorpack.utils.argtools import memoized
from config import config as cfg
from utils.box_ops import area as tf_area
from .backbone import GroupNorm
from .model_box import roi_align
from .model_rpn import generate_rpn_proposals, rpn_losses, get_all_anchors
@layer_register(log_shape=True)
def fpn_model(features):
"""
Args:
features ([tf.Tensor]): ResNet features c2-c5
Returns:
[tf.Tensor]: FPN features p2-p6
"""
assert len(features) == 4, features
num_channel = cfg.FPN.NUM_CHANNEL
use_gn = cfg.FPN.NORM == 'GN'
def upsample2x(name, x):
try:
resize = tf.compat.v2.image.resize_images
with tf.name_scope(name):
shp2d = tf.shape(x)[2:]
x = tf.transpose(x, [0, 2, 3, 1])
x = resize(x, shp2d * 2, 'nearest')
x = tf.transpose(x, [0, 3, 1, 2])
return x
except AttributeError:
return FixedUnPooling(
name, x, 2, unpool_mat=np.ones((2, 2), dtype='float32'),
data_format='channels_first')
with argscope(Conv2D, data_format='channels_first',
activation=tf.identity, use_bias=True,
kernel_initializer=tf.variance_scaling_initializer(scale=1.)):
lat_2345 = [Conv2D('lateral_1x1_c{}'.format(i + 2), c, num_channel, 1)
for i, c in enumerate(features)]
if use_gn:
lat_2345 = [GroupNorm('gn_c{}'.format(i + 2), c) for i, c in enumerate(lat_2345)]
lat_sum_5432 = []
for idx, lat in enumerate(lat_2345[::-1]):
if idx == 0:
lat_sum_5432.append(lat)
else:
lat = lat + upsample2x('upsample_lat{}'.format(6 - idx), lat_sum_5432[-1])
lat_sum_5432.append(lat)
p2345 = [Conv2D('posthoc_3x3_p{}'.format(i + 2), c, num_channel, 3)
for i, c in enumerate(lat_sum_5432[::-1])]
if use_gn:
p2345 = [GroupNorm('gn_p{}'.format(i + 2), c) for i, c in enumerate(p2345)]
p6 = MaxPooling('maxpool_p6', p2345[-1], pool_size=1, strides=2, data_format='channels_first', padding='VALID')
return p2345 + [p6]
@under_name_scope()
def fpn_map_rois_to_levels(boxes):
"""
Assign boxes to level 2~5.
Args:
boxes (nx4):
Returns:
[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.
[tf.Tensor]: 4 tensors, the gathered boxes in each level.
Be careful that the returned tensor could be empty.
"""
sqrtarea = tf.sqrt(tf_area(boxes))
level = tf.cast(tf.floor(
4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32)
# RoI levels range from 2~5 (not 6)
level_ids = [
tf.where(level <= 2),
tf.where(tf.equal(level, 3)), # == is not supported
tf.where(tf.equal(level, 4)),
tf.where(level >= 5)]
level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
for i, x in enumerate(level_ids)]
num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
for i, x in enumerate(level_ids)]
add_moving_summary(*num_in_levels)
level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
return level_ids, level_boxes
@under_name_scope()
def multilevel_roi_align(features, rcnn_boxes, resolution):
"""
Args:
features ([tf.Tensor]): 4 FPN feature level 2-5
rcnn_boxes (tf.Tensor): nx4 boxes
resolution (int): output spatial resolution
Returns:
NxC x res x res
"""
assert len(features) == 4, features
# Reassign rcnn_boxes to levels
level_ids, level_boxes = fpn_map_rois_to_levels(rcnn_boxes)
all_rois = []
# Crop patches from corresponding levels
for i, boxes, featuremap in zip(itertools.count(), level_boxes, features):
with tf.name_scope('roi_level{}'.format(i + 2)):
boxes_on_featuremap = boxes * (1.0 / cfg.FPN.ANCHOR_STRIDES[i])
all_rois.append(roi_align(featuremap, boxes_on_featuremap, resolution))
# this can fail if using TF<=1.8 with MKL build
all_rois = tf.concat(all_rois, axis=0) # NCHW
# Unshuffle to the original order, to match the original samples
level_id_perm = tf.concat(level_ids, axis=0) # A permutation of 1~N
level_id_invert_perm = tf.invert_permutation(level_id_perm)
all_rois = tf.gather(all_rois, level_id_invert_perm, name="output")
return all_rois
def multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits):
"""
Args:
multilevel_anchors: #lvl RPNAnchors
multilevel_label_logits: #lvl tensors of shape HxWxA
multilevel_box_logits: #lvl tensors of shape HxWxAx4
Returns:
label_loss, box_loss
"""
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_anchors) == num_lvl
assert len(multilevel_label_logits) == num_lvl
assert len(multilevel_box_logits) == num_lvl
losses = []
with tf.name_scope('rpn_losses'):
for lvl in range(num_lvl):
anchors = multilevel_anchors[lvl]
label_loss, box_loss = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(),
multilevel_label_logits[lvl], multilevel_box_logits[lvl],
name_scope='level{}'.format(lvl + 2))
losses.extend([label_loss, box_loss])
total_label_loss = tf.add_n(losses[::2], name='label_loss')
total_box_loss = tf.add_n(losses[1::2], name='box_loss')
add_moving_summary(total_label_loss, total_box_loss)
return [total_label_loss, total_box_loss]
@under_name_scope()
def generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d):
"""
Args:
multilevel_pred_boxes: #lvl HxWxAx4 boxes
multilevel_label_logits: #lvl tensors of shape HxWxA
Returns:
boxes: kx4 float
scores: k logits
"""
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_pred_boxes) == num_lvl
assert len(multilevel_label_logits) == num_lvl
training = get_current_tower_context().is_training
all_boxes = []
all_scores = []
if cfg.FPN.PROPOSAL_MODE == 'Level':
fpn_nms_topk = cfg.RPN.TRAIN_PER_LEVEL_NMS_TOPK if training else cfg.RPN.TEST_PER_LEVEL_NMS_TOPK
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(multilevel_label_logits[lvl], [-1]),
image_shape2d, fpn_nms_topk)
all_boxes.append(proposal_boxes)
all_scores.append(proposal_scores)
proposal_boxes = tf.concat(all_boxes, axis=0) # nx4
proposal_scores = tf.concat(all_scores, axis=0) # n
# Here we are different from Detectron.
# Detectron picks top-k within the batch, rather than within an image. However we do not have a batch.
proposal_topk = tf.minimum(tf.size(proposal_scores), fpn_nms_topk)
proposal_scores, topk_indices = tf.nn.top_k(proposal_scores, k=proposal_topk, sorted=False)
proposal_boxes = tf.gather(proposal_boxes, topk_indices, name="all_proposals")
else:
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
all_boxes.append(tf.reshape(pred_boxes_decoded, [-1, 4]))
all_scores.append(tf.reshape(multilevel_label_logits[lvl], [-1]))
all_boxes = tf.concat(all_boxes, axis=0)
all_scores = tf.concat(all_scores, axis=0)
proposal_boxes, proposal_scores = generate_rpn_proposals(
all_boxes, all_scores, image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if training else cfg.RPN.TEST_POST_NMS_TOPK)
tf.sigmoid(proposal_scores, name='probs') # for visualization
return tf.stop_gradient(proposal_boxes, name='boxes'), \
tf.stop_gradient(proposal_scores, name='scores')
@memoized
def get_all_anchors_fpn(*, strides, sizes, ratios, max_size):
"""
Returns:
[anchors]: each anchors is a SxSx NUM_ANCHOR_RATIOS x4 array.
"""
assert len(strides) == len(sizes)
foas = []
for stride, size in zip(strides, sizes):
foa = get_all_anchors(stride=stride, sizes=(size,), ratios=ratios, max_size=max_size)
foas.append(foa)
return foas
| 9,207 | 38.016949 | 119 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/modeling/model_box.py | # -*- coding: utf-8 -*-
# File: model_box.py
import numpy as np
import tensorflow as tf
from collections import namedtuple
from tensorpack.tfutils.scope_utils import under_name_scope
from config import config
@under_name_scope()
def clip_boxes(boxes, window, name=None):
"""
Args:
boxes: nx4, xyxy
window: [h, w]
"""
boxes = tf.maximum(boxes, 0.0)
m = tf.tile(tf.reverse(window, [0]), [2]) # (4,)
boxes = tf.minimum(boxes, tf.cast(m, tf.float32), name=name)
return boxes
@under_name_scope()
def decode_bbox_target(box_predictions, anchors):
"""
Args:
box_predictions: (..., 4), logits
anchors: (..., 4), floatbox. Must have the same shape
Returns:
box_decoded: (..., 4), float32. With the same shape.
"""
orig_shape = tf.shape(anchors)
box_pred_txtytwth = tf.reshape(box_predictions, (-1, 2, 2))
box_pred_txty, box_pred_twth = tf.split(box_pred_txtytwth, 2, axis=1)
# each is (...)x1x2
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1)
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
clip = np.log(config.PREPROC.MAX_SIZE / 16.)
wbhb = tf.exp(tf.minimum(box_pred_twth, clip)) * waha
xbyb = box_pred_txty * waha + xaya
x1y1 = xbyb - wbhb * 0.5
x2y2 = xbyb + wbhb * 0.5 # (...)x1x2
out = tf.concat([x1y1, x2y2], axis=-2)
return tf.reshape(out, orig_shape)
@under_name_scope()
def encode_bbox_target(boxes, anchors):
"""
Args:
boxes: (..., 4), float32
anchors: (..., 4), float32
Returns:
box_encoded: (..., 4), float32 with the same shape.
"""
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1)
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
boxes_x1y1x2y2 = tf.reshape(boxes, (-1, 2, 2))
boxes_x1y1, boxes_x2y2 = tf.split(boxes_x1y1x2y2, 2, axis=1)
wbhb = boxes_x2y2 - boxes_x1y1
xbyb = (boxes_x2y2 + boxes_x1y1) * 0.5
# Note that here not all boxes are valid. Some may be zero
txty = (xbyb - xaya) / waha
twth = tf.log(wbhb / waha) # may contain -inf for invalid boxes
encoded = tf.concat([txty, twth], axis=1) # (-1x2x2)
return tf.reshape(encoded, tf.shape(boxes))
@under_name_scope()
def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=True):
"""
Aligned version of tf.image.crop_and_resize, following our definition of floating point boxes.
Args:
image: NCHW
boxes: nx4, x1y1x2y2
box_ind: (n,)
crop_size (int):
Returns:
n,C,size,size
"""
assert isinstance(crop_size, int), crop_size
boxes = tf.stop_gradient(boxes)
# TF's crop_and_resize produces zeros on border
if pad_border:
# this can be quite slow
image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC')
boxes = boxes + 1
@under_name_scope()
def transform_fpcoor_for_tf(boxes, image_shape, crop_shape):
"""
The way tf.image.crop_and_resize works (with normalized box):
Initial point (the value of output[0]): x0_box * (W_img - 1)
Spacing: w_box * (W_img - 1) / (W_crop - 1)
Use the above grid to bilinear sample.
However, what we want is (with fpcoor box):
Spacing: w_box / W_crop
Initial point: x0_box + spacing/2 - 0.5
(-0.5 because bilinear sample (in my definition) assumes floating point coordinate
(0.0, 0.0) is the same as pixel value (0, 0))
This function transform fpcoor boxes to a format to be used by tf.image.crop_and_resize
Returns:
y1x1y2x2
"""
x0, y0, x1, y1 = tf.split(boxes, 4, axis=1)
spacing_w = (x1 - x0) / tf.cast(crop_shape[1], tf.float32)
spacing_h = (y1 - y0) / tf.cast(crop_shape[0], tf.float32)
imshape = [tf.cast(image_shape[0] - 1, tf.float32), tf.cast(image_shape[1] - 1, tf.float32)]
nx0 = (x0 + spacing_w / 2 - 0.5) / imshape[1]
ny0 = (y0 + spacing_h / 2 - 0.5) / imshape[0]
nw = spacing_w * tf.cast(crop_shape[1] - 1, tf.float32) / imshape[1]
nh = spacing_h * tf.cast(crop_shape[0] - 1, tf.float32) / imshape[0]
return tf.concat([ny0, nx0, ny0 + nh, nx0 + nw], axis=1)
image_shape = tf.shape(image)[2:]
boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size])
image = tf.transpose(image, [0, 2, 3, 1]) # nhwc
ret = tf.image.crop_and_resize(
image, boxes, tf.cast(box_ind, tf.int32),
crop_size=[crop_size, crop_size])
ret = tf.transpose(ret, [0, 3, 1, 2]) # ncss
return ret
@under_name_scope()
def roi_align(featuremap, boxes, resolution):
"""
Args:
featuremap: 1xCxHxW
boxes: Nx4 floatbox
resolution: output spatial resolution
Returns:
NxCx res x res
"""
# sample 4 locations per roi bin
ret = crop_and_resize(
featuremap, boxes,
tf.zeros([tf.shape(boxes)[0]], dtype=tf.int32),
resolution * 2)
try:
avgpool = tf.nn.avg_pool2d
except AttributeError:
avgpool = tf.nn.avg_pool
ret = avgpool(ret, [1, 1, 2, 2], [1, 1, 2, 2], padding='SAME', data_format='NCHW')
return ret
class RPNAnchors(namedtuple('_RPNAnchors', ['boxes', 'gt_labels', 'gt_boxes'])):
"""
boxes (FS x FS x NA x 4): The anchor boxes.
gt_labels (FS x FS x NA):
gt_boxes (FS x FS x NA x 4): Groundtruth boxes corresponding to each anchor.
"""
def encoded_gt_boxes(self):
return encode_bbox_target(self.gt_boxes, self.boxes)
def decode_logits(self, logits):
return decode_bbox_target(logits, self.boxes)
@under_name_scope()
def narrow_to(self, featuremap):
"""
Slice anchors to the spatial size of this featuremap.
"""
shape2d = tf.shape(featuremap)[2:] # h,w
slice3d = tf.concat([shape2d, [-1]], axis=0)
slice4d = tf.concat([shape2d, [-1, -1]], axis=0)
boxes = tf.slice(self.boxes, [0, 0, 0, 0], slice4d)
gt_labels = tf.slice(self.gt_labels, [0, 0, 0], slice3d)
gt_boxes = tf.slice(self.gt_boxes, [0, 0, 0, 0], slice4d)
return RPNAnchors(boxes, gt_labels, gt_boxes)
if __name__ == '__main__':
"""
Demonstrate what's wrong with tf.image.crop_and_resize.
Also reported at https://github.com/tensorflow/tensorflow/issues/26278
"""
import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution()
# want to crop 2x2 out of a 5x5 image, and resize to 4x4
image = np.arange(25).astype('float32').reshape(5, 5)
boxes = np.asarray([[1, 1, 3, 3]], dtype='float32')
target = 4
print(crop_and_resize(
image[None, None, :, :], boxes, [0], target)[0][0])
"""
Expected values:
4.5 5 5.5 6
7 7.5 8 8.5
9.5 10 10.5 11
12 12.5 13 13.5
You cannot easily get the above results with tf.image.crop_and_resize.
Try out yourself here:
"""
print(tf.image.crop_and_resize(
image[None, :, :, None],
np.asarray([[1, 1, 2, 2]]) / 4.0, [0], [target, target])[0][:, :, 0])
| 7,322 | 31.402655 | 100 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/convert_d2/convert_d2.py | #!/usr/bin/env python
import argparse
import numpy as np
import pickle
from detectron2.config import get_cfg
def convert_config(cfg):
ret = []
ret.append(("MODE_MASK", cfg.MODEL.MASK_ON))
has_fpn = "fpn" in cfg.MODEL.BACKBONE.NAME
ret.append(("MODE_FPN", has_fpn))
if not has_fpn:
# we only support C4 and FPN
assert cfg.MODEL.ROI_HEADS.NAME == "Res5ROIHeads"
else:
ret.append(("FPN.CASCADE", cfg.MODEL.ROI_HEADS.NAME == "CascadeROIHeads"))
assert len(cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS) == 3
depth = cfg.MODEL.RESNETS.DEPTH
assert depth in [50, 101], depth
if depth == 101:
ret.append(("BACKBONE.RESNET_NUM_BLOCKS", [3, 4, 23, 3]))
ret.append(("BACKBONE.STRIDE_1X1", cfg.MODEL.RESNETS.STRIDE_IN_1X1))
ret.append(("PREPROC.PIXEL_MEAN", cfg.MODEL.PIXEL_MEAN[::-1]))
ret.append(("PREPROC.PIXEL_STD", cfg.MODEL.PIXEL_STD[::-1]))
assert cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE == "ROIAlignV2"
assert cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE == "ROIAlignV2"
return ret
def convert_weights(d, cfg):
has_fpn = "fpn" in cfg.MODEL.BACKBONE.NAME
ret = {}
def _convert_conv(src, dst):
src_w = d.pop(src + ".weight").transpose(2, 3, 1, 0)
ret[dst + "/W"] = src_w
if src + ".norm.weight" in d: # has norm
ret[dst + "/bn/gamma"] = d.pop(src + ".norm.weight")
ret[dst + "/bn/beta"] = d.pop(src + ".norm.bias")
ret[dst + "/bn/variance/EMA"] = d.pop(src + ".norm.running_var")
ret[dst + "/bn/mean/EMA"] = d.pop(src + ".norm.running_mean")
if src + ".bias" in d:
ret[dst + "/b"] = d.pop(src + ".bias")
def _convert_fc(src, dst):
ret[dst + "/W"] = d.pop(src + ".weight").transpose()
ret[dst + "/b"] = d.pop(src + ".bias")
if has_fpn:
backbone_prefix = "backbone.bottom_up."
else:
backbone_prefix = "backbone."
_convert_conv(backbone_prefix + "stem.conv1", "conv0")
for grpid in range(4):
if not has_fpn and grpid == 3:
backbone_prefix = "roi_heads."
for blkid in range([3, 4, 6 if cfg.MODEL.RESNETS.DEPTH == 50 else 23, 3][grpid]):
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.conv1",
f"group{grpid}/block{blkid}/conv1")
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.conv2",
f"group{grpid}/block{blkid}/conv2")
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.conv3",
f"group{grpid}/block{blkid}/conv3")
if blkid == 0:
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.shortcut",
f"group{grpid}/block{blkid}/convshortcut")
if has_fpn:
for lvl in range(2, 6):
_convert_conv(f"backbone.fpn_lateral{lvl}", f"fpn/lateral_1x1_c{lvl}")
_convert_conv(f"backbone.fpn_output{lvl}", f"fpn/posthoc_3x3_p{lvl}")
# RPN:
_convert_conv("proposal_generator.rpn_head.conv", "rpn/conv0")
_convert_conv("proposal_generator.rpn_head.objectness_logits", "rpn/class")
_convert_conv("proposal_generator.rpn_head.anchor_deltas", "rpn/box")
def _convert_box_predictor(src, dst):
if cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG:
_convert_fc(src + ".bbox_pred", dst + "/box")
else:
v = d.pop(src + ".bbox_pred.bias")
ret[dst + "/box/b"] = np.concatenate((v[:4], v))
v = d.pop(src + ".bbox_pred.weight")
ret[dst + "/box/W"] = np.concatenate((v[:4, :], v), axis=0).transpose()
_convert_fc(src + ".cls_score", dst + "/class")
num_class = ret[dst + "/class/W"].shape[1] - 1
idxs = np.concatenate(((num_class, ), np.arange(num_class)))
ret[dst + "/class/W"] = ret[dst + "/class/W"][:, idxs]
ret[dst + "/class/b"] = ret[dst + "/class/b"][idxs]
# Fast R-CNN: box head
has_cascade = cfg.MODEL.ROI_HEADS.NAME == "CascadeROIHeads"
if has_cascade:
assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
for k in range(3):
for i in range(cfg.MODEL.ROI_BOX_HEAD.NUM_FC):
_convert_fc(f"roi_heads.box_head.{k}.fc{i+1}", f"cascade_rcnn_stage{k+1}/head/fc{i+6}")
_convert_box_predictor(f"roi_heads.box_predictor.{k}", f"cascade_rcnn_stage{k+1}/outputs")
else:
for i in range(cfg.MODEL.ROI_BOX_HEAD.NUM_FC):
_convert_fc(f"roi_heads.box_head.fc{i+1}", f"fastrcnn/fc{i+6}")
_convert_box_predictor("roi_heads.box_predictor", "fastrcnn/outputs" if has_fpn else "fastrcnn")
# mask head
if cfg.MODEL.MASK_ON:
for fcn in range(cfg.MODEL.ROI_MASK_HEAD.NUM_CONV):
_convert_conv(f"roi_heads.mask_head.mask_fcn{fcn+1}", f"maskrcnn/fcn{fcn}")
_convert_conv("roi_heads.mask_head.deconv", "maskrcnn/deconv")
_convert_conv("roi_heads.mask_head.predictor", "maskrcnn/conv")
for k in list(d.keys()):
if "cell_anchors" in k:
d.pop(k)
assert len(d) == 0, d.keys()
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--d2-config")
parser.add_argument("--d2-pkl")
parser.add_argument("--output")
args = parser.parse_args()
cfg = get_cfg()
cfg.merge_from_file(args.d2_config)
tp_cfg = convert_config(cfg)
for k, v in tp_cfg:
print("'{}={}'".format(k, v).replace(' ', ''), end=' ')
with open(args.d2_pkl, "rb") as f:
d2_dict = pickle.load(f)["model"]
tp_dict = convert_weights(d2_dict, cfg)
np.savez_compressed(args.output, **tp_dict)
| 5,712 | 38.673611 | 104 | py |
SyNet | SyNet-master/tensorpack/sotabench/sotabench.py | # -*- coding: utf-8 -*-
import os
import sys
import tqdm
from contextlib import contextmanager
from tensorpack.predict import OfflinePredictor, PredictConfig
from tensorpack.tfutils import SmartInit
from tensorpack.utils.fs import download
from sotabencheval.utils import is_server
from sotabencheval.object_detection import COCOEvaluator
# import faster rcnn example
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "examples", "FasterRCNN"))
from config import finalize_configs, config as cfg # noqa
from eval import predict_image # noqa
from dataset import register_coco # noqa
from dataset.coco import COCODetection # noqa
from data import get_eval_dataflow # noqa
from modeling.generalized_rcnn import ResNetFPNModel, ResNetC4Model # noqa
if is_server():
DATA_ROOT = "./.data/vision/"
else: # local settings
DATA_ROOT = os.path.expanduser("~/data/")
COCO_ROOT = os.path.join(DATA_ROOT, "coco")
register_coco(COCO_ROOT)
@contextmanager
def backup_cfg():
orig_config = cfg.to_dict()
yield
cfg.from_dict(orig_config)
def evaluate_rcnn(model_name, paper_arxiv_id, cfg_list, model_file):
evaluator = COCOEvaluator(
root=COCO_ROOT, model_name=model_name, paper_arxiv_id=paper_arxiv_id
)
category_id_to_coco_id = {
v: k for k, v in COCODetection.COCO_id_to_category_id.items()
}
cfg.update_args(cfg_list) # TODO backup/restore config
finalize_configs(False)
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
predcfg = PredictConfig(
model=MODEL,
session_init=SmartInit(model_file),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1],
)
predictor = OfflinePredictor(predcfg)
def xyxy_to_xywh(box):
box[2] -= box[0]
box[3] -= box[1]
return box
df = get_eval_dataflow("coco_val2017")
df.reset_state()
for img, img_id in tqdm.tqdm(df, total=len(df)):
results = predict_image(img, predictor)
res = [
{
"image_id": img_id,
"category_id": category_id_to_coco_id.get(
int(r.class_id), int(r.class_id)
),
"bbox": xyxy_to_xywh([round(float(x), 4) for x in r.box]),
"score": round(float(r.score), 3),
}
for r in results
]
evaluator.add(res)
if evaluator.cache_exists:
break
evaluator.save()
download(
"http://models.tensorpack.com/FasterRCNN/COCO-MaskRCNN-R50FPN2x.npz",
"./",
expect_size=165362754)
with backup_cfg():
evaluate_rcnn(
"Mask R-CNN (ResNet-50-FPN, 2x)", "1703.06870", [],
"COCO-MaskRCNN-R50FPN2x.npz",
)
download(
"http://models.tensorpack.com/FasterRCNN/COCO-MaskRCNN-R50FPN2xGN.npz",
"./",
expect_size=167363872)
with backup_cfg():
evaluate_rcnn(
"Mask R-CNN (ResNet-50-FPN, GroupNorm)", "1803.08494",
"""FPN.NORM=GN BACKBONE.NORM=GN
FPN.FRCNN_HEAD_FUNC=fastrcnn_4conv1fc_gn_head
FPN.MRCNN_HEAD_FUNC=maskrcnn_up4conv_gn_head""".split(),
"COCO-MaskRCNN-R50FPN2xGN.npz",
)
download(
"http://models.tensorpack.com/FasterRCNN/COCO-MaskRCNN-R101FPN9xGNCasAugScratch.npz",
"./",
expect_size=355680386)
with backup_cfg():
evaluate_rcnn(
"Mask R-CNN (ResNet-101-FPN, GN, Cascade)", "1811.08883",
"""
FPN.CASCADE=True BACKBONE.RESNET_NUM_BLOCKS=[3,4,23,3] FPN.NORM=GN
BACKBONE.NORM=GN FPN.FRCNN_HEAD_FUNC=fastrcnn_4conv1fc_gn_head
FPN.MRCNN_HEAD_FUNC=maskrcnn_up4conv_gn_head""".split(),
"COCO-MaskRCNN-R101FPN9xGNCasAugScratch.npz",
)
| 3,715 | 28.492063 | 91 | py |
SyNet | SyNet-master/tensorpack/scripts/checkpoint-manipulate.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: checkpoint-manipulate.py
import argparse
import numpy as np
from tensorpack.tfutils.varmanip import load_chkpt_vars
from tensorpack.utils import logger
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model')
parser.add_argument('--dump', help='dump to an npz file')
parser.add_argument('--shell', action='store_true', help='start a shell with the params')
args = parser.parse_args()
if args.model.endswith('.npy'):
params = np.load(args.model, encoding='latin1').item()
elif args.model.endswith('.npz'):
params = dict(np.load(args.model))
else:
params = load_chkpt_vars(args.model)
logger.info("Variables in the model:")
logger.info(str(params.keys()))
if args.dump:
assert args.dump.endswith('.npz'), args.dump
np.savez(args.dump, **params)
if args.shell:
# params is a dict. play with it
import IPython as IP
IP.embed(config=IP.terminal.ipapp.load_default_config())
| 1,076 | 28.916667 | 93 | py |
SyNet | SyNet-master/tensorpack/scripts/checkpoint-prof.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: checkpoint-prof.py
import argparse
import numpy as np
import tensorflow as tf
from tensorpack import get_default_sess_config, get_op_tensor_name
from tensorpack.tfutils.sessinit import SmartInit
from tensorpack.utils import logger
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='model file')
parser.add_argument('--meta', help='metagraph proto file. Will be used to load the graph', required=True)
parser.add_argument('-i', '--input', nargs='+', help='list of input tensors with their shapes.')
parser.add_argument('-o', '--output', nargs='+', help='list of output tensors')
parser.add_argument('--warmup', help='warmup iterations', type=int, default=5)
parser.add_argument('--print-flops', action='store_true')
parser.add_argument('--print-params', action='store_true')
parser.add_argument('--print-timing', action='store_true')
args = parser.parse_args()
tf.train.import_meta_graph(args.meta, clear_devices=True)
G = tf.get_default_graph()
with tf.Session(config=get_default_sess_config()) as sess:
init = SmartInit(args.model)
init.init(sess)
feed = {}
for inp in args.input:
inp = inp.split('=')
name = get_op_tensor_name(inp[0].strip())[1]
shape = list(map(int, inp[1].strip().split(',')))
tensor = G.get_tensor_by_name(name)
logger.info("Feeding shape ({}) to tensor {}".format(','.join(map(str, shape)), name))
feed[tensor] = np.random.rand(*shape)
fetches = []
for name in args.output:
name = get_op_tensor_name(name)[1]
fetches.append(G.get_tensor_by_name(name))
logger.info("Fetching tensors: {}".format(', '.join([k.name for k in fetches])))
for _ in range(args.warmup):
sess.run(fetches, feed_dict=feed)
opt = tf.RunOptions()
opt.trace_level = tf.RunOptions.FULL_TRACE
meta = tf.RunMetadata()
sess.run(fetches, feed_dict=feed, options=opt, run_metadata=meta)
if args.print_flops:
tf.profiler.profile(
G,
run_meta=meta,
cmd='op',
options=tf.profiler.ProfileOptionBuilder.float_operation())
if args.print_params:
tf.profiler.profile(
G,
run_meta=meta,
options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
if args.print_timing:
tf.profiler.profile(
G,
run_meta=meta,
options=tf.profiler.ProfileOptionBuilder.time_and_memory())
| 2,749 | 37.194444 | 109 | py |
SyNet | SyNet-master/tensorpack/scripts/ls-checkpoint.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: ls-checkpoint.py
import numpy as np
import pprint
import sys
import six
import tensorflow as tf
from tensorpack.tfutils.varmanip import get_checkpoint_path
if __name__ == '__main__':
fpath = sys.argv[1]
if fpath.endswith('.npy'):
params = np.load(fpath, encoding='latin1').item()
dic = {k: v.shape for k, v in six.iteritems(params)}
elif fpath.endswith('.npz'):
params = dict(np.load(fpath))
dic = {k: v.shape for k, v in six.iteritems(params)}
else:
path = get_checkpoint_path(fpath)
reader = tf.train.NewCheckpointReader(path)
dic = reader.get_variable_to_shape_map()
pprint.pprint(dic)
| 723 | 25.814815 | 60 | py |
SyNet | SyNet-master/tensorpack/scripts/dump-model-params.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: dump-model-params.py
import argparse
import numpy as np
import os
import six
import tensorflow as tf
from tensorpack import logger
from tensorpack.tfutils import varmanip
from tensorpack.tfutils.common import get_op_tensor_name, get_tf_version_tuple
TF_version = get_tf_version_tuple()
def _import_external_ops(message):
if "horovod" in message.lower():
logger.info("Importing horovod ...")
import horovod.tensorflow # noqa
return
if "MaxBytesInUse" in message:
logger.info("Importing memory_stats ...")
from tensorflow.contrib.memory_stats import MaxBytesInUse # noqa
return
if 'Nccl' in message:
logger.info("Importing nccl ...")
if TF_version <= (1, 12):
try:
from tensorflow.contrib.nccl.python.ops.nccl_ops import _validate_and_load_nccl_so
except Exception:
pass
else:
_validate_and_load_nccl_so()
from tensorflow.contrib.nccl.ops import gen_nccl_ops # noqa
else:
from tensorflow.python.ops import gen_nccl_ops # noqa
return
if 'ZMQConnection' in message:
import zmq_ops # noqa
return
logger.error("Unhandled error: " + message)
def guess_inputs(input_dir):
meta_candidates = []
model_candidates = []
for path in os.listdir(input_dir):
if path.startswith('graph-') and path.endswith('.meta'):
meta_candidates.append(path)
if path.startswith('model-') and path.endswith('.index'):
modelid = int(path[len('model-'):-len('.index')])
model_candidates.append((path, modelid))
assert len(meta_candidates)
meta = sorted(meta_candidates)[-1]
if len(meta_candidates) > 1:
logger.info("Choosing {} from {} as graph file.".format(meta, meta_candidates))
else:
logger.info("Choosing {} as graph file.".format(meta))
assert len(model_candidates)
model = sorted(model_candidates, key=lambda x: x[1])[-1][0]
if len(model_candidates) > 1:
logger.info("Choosing {} from {} as model file.".format(model, [x[0] for x in model_candidates]))
else:
logger.info("Choosing {} as model file.".format(model))
return os.path.join(input_dir, model), os.path.join(input_dir, meta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Keep only TRAINABLE and MODEL variables in a checkpoint.')
parser.add_argument('--meta', help='metagraph file')
parser.add_argument(dest='input', help='input model file, has to be a TF checkpoint')
parser.add_argument(dest='output', help='output model file, can be npz or TF checkpoint')
args = parser.parse_args()
if os.path.isdir(args.input):
input, meta = guess_inputs(args.input)
else:
meta = args.meta
input = args.input
# this script does not need GPU
os.environ['CUDA_VISIBLE_DEVICES'] = ''
if args.meta is not None:
while True:
try:
tf.reset_default_graph()
tf.train.import_meta_graph(meta, clear_devices=True)
except KeyError as e:
op_name = e.args[0]
_import_external_ops(op_name)
except tf.errors.NotFoundError as e:
_import_external_ops(str(e))
else:
break
# loading...
if input.endswith('.npz'):
dic = np.load(input)
else:
dic = varmanip.load_chkpt_vars(input)
dic = {get_op_tensor_name(k)[1]: v for k, v in six.iteritems(dic)}
if args.meta is not None:
# save variables that are GLOBAL, and either TRAINABLE or MODEL
var_to_dump = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var_to_dump.extend(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
if len(set(var_to_dump)) != len(var_to_dump):
logger.warn("TRAINABLE and MODEL variables have duplication!")
var_to_dump = list(set(var_to_dump))
globvarname = {k.name for k in tf.global_variables()}
var_to_dump = {k.name for k in var_to_dump if k.name in globvarname}
for name in var_to_dump:
assert name in dic, "Variable {} not found in the model!".format(name)
else:
var_to_dump = set(dic.keys())
dic_to_dump = {k: v for k, v in six.iteritems(dic) if k in var_to_dump}
varmanip.save_chkpt_vars(dic_to_dump, args.output)
| 4,521 | 35.176 | 105 | py |
SyNet | SyNet-master/tensorpack/tests/test_resnet.py | from case_script import TestPythonScript # noqa
# this tests occasionally fails (memory issue on travis?)
# class ResnetTest(TestPythonScript):
# @property
# def script(self):
# return '../examples/ResNet/imagenet-resnet.py'
#
# def test(self):
# self.assertSurvive(
# self.script,
# args=['--fake', '--data_format NHWC'], timeout=20)
| 391 | 25.133333 | 64 | py |
SyNet | SyNet-master/tensorpack/tests/benchmark-serializer.py | #!/usr/bin/env python3
import numpy as np
import argparse
import pyarrow as pa
from tabulate import tabulate
import operator
from tensorpack.utils import logger
from tensorpack.utils.serialize import (
MsgpackSerializer,
PyarrowSerializer,
PickleSerializer,
ForkingPickler,
)
from tensorpack.utils.timer import Timer
def benchmark_serializer(dumps, loads, data, num):
buf = dumps(data)
enc_timer = Timer()
dec_timer = Timer()
enc_timer.pause()
dec_timer.pause()
for k in range(num):
enc_timer.resume()
buf = dumps(data)
enc_timer.pause()
dec_timer.resume()
loads(buf)
dec_timer.pause()
dumps_time = enc_timer.seconds() / num
loads_time = dec_timer.seconds() / num
return dumps_time, loads_time
def display_results(name, results):
logger.info("Encoding benchmark for {}:".format(name))
data = sorted(((x, y[0]) for x, y in results), key=operator.itemgetter(1))
print(tabulate(data, floatfmt='.5f'))
logger.info("Decoding benchmark for {}:".format(name))
data = sorted(((x, y[1]) for x, y in results), key=operator.itemgetter(1))
print(tabulate(data, floatfmt='.5f'))
def benchmark_all(name, serializers, data, num=30):
logger.info("Benchmarking {} ...".format(name))
results = []
for serializer_name, dumps, loads in serializers:
results.append((serializer_name, benchmark_serializer(dumps, loads, data, num=num)))
display_results(name, results)
def fake_json_data():
return {
'words': """
Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Mauris adipiscing adipiscing placerat.
Vestibulum augue augue,
pellentesque quis sollicitudin id, adipiscing.
""" * 100,
'list': list(range(100)) * 500,
'dict': {str(i): 'a' for i in range(50000)},
'dict2': {i: 'a' for i in range(50000)},
'int': 3000,
'float': 100.123456
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("task")
args = parser.parse_args()
serializers = [
("msgpack", MsgpackSerializer.dumps, MsgpackSerializer.loads),
("pyarrow-buf", PyarrowSerializer.dumps, PyarrowSerializer.loads),
("pyarrow-bytes", PyarrowSerializer.dumps_bytes, PyarrowSerializer.loads),
("pickle", PickleSerializer.dumps, PickleSerializer.loads),
("forking-pickle", ForkingPickler.dumps, ForkingPickler.loads),
]
if args.task == "numpy":
numpy_data = [np.random.rand(64, 224, 224, 3).astype("float32"), np.random.rand(64).astype('int32')]
benchmark_all("numpy data", serializers, numpy_data)
elif args.task == "json":
benchmark_all("json data", serializers, fake_json_data(), num=50)
elif args.task == "torch":
import torch
from pyarrow.lib import _default_serialization_context
pa.register_torch_serialization_handlers(_default_serialization_context)
torch_data = [torch.rand(64, 224, 224, 3), torch.rand(64).to(dtype=torch.int32)]
benchmark_all("torch data", serializers[1:], torch_data)
| 3,180 | 31.131313 | 108 | py |
SyNet | SyNet-master/tensorpack/tests/test_infogan.py | from case_script import TestPythonScript
from tensorpack.tfutils.common import get_tf_version_tuple
class InfoGANTest(TestPythonScript):
@property
def script(self):
return '../examples/GAN/InfoGAN-mnist.py'
def test(self):
return True # https://github.com/tensorflow/tensorflow/issues/24517
if get_tf_version_tuple() < (1, 4):
return True # requires leaky_relu
self.assertSurvive(self.script, args=None)
| 470 | 26.705882 | 76 | py |
SyNet | SyNet-master/tensorpack/tests/test_mnist.py | from case_script import TestPythonScript
class MnistTest(TestPythonScript):
@property
def script(self):
return '../examples/basics/mnist-convnet.py'
def test(self):
self.assertSurvive(self.script, args=None)
| 240 | 19.083333 | 52 | py |
SyNet | SyNet-master/tensorpack/tests/case_script.py | from abc import abstractproperty
import unittest
import subprocess
import shlex
import sys
import threading
import os
import shutil
class PythonScript(threading.Thread):
"""A wrapper to start a python script with timeout.
To test the actual models even without GPUs we simply start them and
test whether they survive a certain amount of time "timeout". This allows to
test if all imports are correct and the computation graph can be built without
run the entire model on the CPU.
Attributes:
cmd (str): command to execute the example with all flags (including python)
p: process handle
timeout (int): timeout in seconds
"""
def __init__(self, cmd, timeout):
"""Prepare a python script
Args:
cmd (str): command to execute the example with all flags (including python)
timeout (int): time in seconds the script has to survive
"""
threading.Thread.__init__(self)
self.cmd = cmd
self.timeout = timeout
def run(self):
self.p = subprocess.Popen(shlex.split(self.cmd), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.out, self.err = self.p.communicate()
def execute(self):
"""Execute python script in other process.
Raises:
SurviveException: contains the error message of the script if it terminated before timeout
"""
self.start()
self.join(self.timeout)
if self.is_alive():
self.p.terminate()
self.p.kill() # kill -9
self.join()
else:
# something unexpected happend here, this script was supposed to survive at least the timeout
if len(self.err) > 0:
output = u"STDOUT: \n\n\n" + self.out.decode('utf-8')
output += u"\n\n\n STDERR: \n\n\n" + self.err.decode('utf-8')
raise AssertionError(output)
class TestPythonScript(unittest.TestCase):
@abstractproperty
def script(self):
pass
@staticmethod
def clear_trainlog(script):
script = os.path.basename(script)
script = script[:-3]
if os.path.isdir(os.path.join("train_log", script)):
shutil.rmtree(os.path.join("train_log", script))
def assertSurvive(self, script, args=None, timeout=20): # noqa
cmd = "python{} {}".format(sys.version_info.major, script)
if args:
cmd += " " + " ".join(args)
PythonScript(cmd, timeout=timeout).execute()
def setUp(self):
TestPythonScript.clear_trainlog(self.script)
def tearDown(self):
TestPythonScript.clear_trainlog(self.script)
| 2,680 | 30.916667 | 105 | py |
SyNet | SyNet-master/tensorpack/tests/test_char_rnn.py | import os
from case_script import TestPythonScript
def random_content():
return ('Lorem ipsum dolor sit amet\n'
'consetetur sadipscing elitr\n'
'sed diam nonumy eirmod tempor invidunt ut labore\n')
class CharRNNTest(TestPythonScript):
@property
def script(self):
return '../examples/Char-RNN/char-rnn.py'
def setUp(self):
super(CharRNNTest, self).setUp()
with open('input.txt', 'w') as f:
f.write(random_content())
def test(self):
self.assertSurvive(self.script, args=['train'])
def tearDown(self):
super(CharRNNTest, self).tearDown()
os.remove('input.txt')
| 675 | 22.310345 | 65 | py |
SyNet | SyNet-master/tensorpack/tests/test_mnist_similarity.py | from case_script import TestPythonScript
class SimilarityLearningTest(TestPythonScript):
@property
def script(self):
return '../examples/SimilarityLearning/mnist-embeddings.py'
def test(self):
self.assertSurvive(self.script, args=['--algorithm triplet'], timeout=10)
| 299 | 24 | 81 | py |
SyNet | SyNet-master/tensorpack/docs/conf.py | # -*- coding: utf-8 -*-
# flake8: noqa
# tensorpack documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 27 01:41:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, re
import mock
import inspect
from sphinx.domains import Domain
class GithubURLDomain(Domain):
"""
Resolve certain links in markdown files to github source.
"""
name = "githuburl"
ROOT = "https://github.com/tensorpack/tensorpack/blob/master/"
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
github_url = None
if ".html" not in target:
if target.startswith("../../") and not target.startswith("../../modules"):
url = target.replace("../", "")
github_url = url
if github_url is not None:
if github_url.endswith("README"):
# bug of recommonmark.
# https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155
github_url += ".md"
print("Ref {} resolved to github:{}".format(target, github_url))
contnode["refuri"] = self.ROOT + github_url
return [("githuburl:any", contnode)]
else:
return []
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
os.environ['DOC_BUILDING'] = '1'
ON_RTD = (os.environ.get('READTHEDOCS') == 'True')
MOCK_MODULES = ['tabulate', 'h5py',
'cv2', 'zmq', 'lmdb',
'msgpack', 'msgpack_numpy', 'pyarrow',
'sklearn', 'sklearn.datasets',
'scipy', 'scipy.misc', 'scipy.io',
'tornado', 'tornado.concurrent',
'horovod', 'horovod.tensorflow',
'subprocess32', 'functools32', 'psutil']
# it's better to have tensorflow installed (for some docs to show)
# but it's OK to mock it as well
try:
import tensorflow
except ImportError:
mod = sys.modules['tensorflow'] = mock.Mock(name='tensorflow')
mod.__version__ = mod.VERSION = '1.12'
MOCK_MODULES.extend(['tensorflow.python.training.monitored_session'])
MOCK_MODULES.extend(['tensorflow.python.training'])
MOCK_MODULES.extend(['tensorflow.python.client'])
MOCK_MODULES.extend(['tensorflow.python.framework'])
MOCK_MODULES.extend(['tensorflow.python.platform'])
MOCK_MODULES.extend(['tensorflow.python.tools'])
MOCK_MODULES.extend(['tensorflow.contrib.graph_editor'])
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock(name=mod_name)
sys.modules['cv2'].__version__ = '3.2.1' # fake version
sys.modules['msgpack'].version = (0, 5, 2)
import tensorpack
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '3.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.napoleon',
#'sphinx.ext.autosectionlabel',
#'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
if ON_RTD:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.1
intersphinx_mapping = {
'python': ('https://docs.python.org/3.6', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tensorpack'
copyright = u'2015 - 2020, Yuxin Wu, et al.'
author = u'Yuxin Wu, et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = tensorpack.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', 'README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# 'tensorpack.' prefix was removed by js
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tensorpack.']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# avoid li fonts being larger
# TODO but li indices fonts are still larger
html_compact_lists = False
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tensorpackdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tensorpack.tex', u'tensorpack documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorpack', u'tensorpack documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tensorpack', u'tensorpack documentation',
author, 'tensorpack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
suppress_warnings = ['image.nonlocal_uri']
#autodoc_member_order = 'bysource'
def process_signature(app, what, name, obj, options, signature,
return_annotation):
if signature:
# replace Mock function names
signature = re.sub('<Mock name=\'([^\']+)\'.*>', '\g<1>', signature)
signature = re.sub('tensorflow', 'tf', signature)
# add scope name to layer signatures:
if hasattr(obj, 'use_scope'):
if obj.use_scope:
signature = signature[0] + 'variable_scope_name, ' + signature[1:]
elif obj.use_scope is None:
signature = signature[0] + '[variable_scope_name,] ' + signature[1:]
# signature: arg list
return signature, return_annotation
_DEPRECATED_NAMES = set([
# deprecated stuff:
'QueueInputTrainer',
'dump_dataflow_to_process_queue',
'DistributedTrainerReplicated',
'DistributedTrainerParameterServer',
'Augmentor',
"get_model_loader",
# renamed items that should not appear in docs
'DumpTensor',
'DumpParamAsImage',
'get_nr_gpu',
'TrainingMonitor',
'PeakMemoryTracker',
'TowerFuncWrapper',
'PrefetchData',
'MultiProcessPrefetchData',
'PrefetchDataZMQ',
'MultiThreadPrefetchData',
# deprecated or renamed symbolic code
'Deconv2D',
# shouldn't appear in doc:
'l2_regularizer', 'l1_regularizer',
# internal only
'execute_only_once',
'humanize_time_delta',
'SessionUpdate',
'get_checkpoint_path',
'IterSpeedCounter'
])
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, '__HIDE_SPHINX_DOC__', False):
return True
if name == '__init__':
if obj.__doc__ and skip:
# include_init_with_doc doesn't work well for decorated init
# https://github.com/sphinx-doc/sphinx/issues/4258
return False
# Hide some names that are deprecated or not intended to be used
if name in _DEPRECATED_NAMES:
return True
if name in ['__iter__', '__len__', 'reset_state', 'get_data', 'size']:
# skip these methods with empty docstring
if not obj.__doc__ and inspect.isfunction(obj):
# https://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3
cls = getattr(inspect.getmodule(obj),
obj.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if issubclass(cls, tensorpack.DataFlow):
return True
return None
def setup(app):
from recommonmark.transform import AutoStructify
app.add_domain(GithubURLDomain)
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-skip-member', autodoc_skip_member)
app.add_config_value(
'recommonmark_config',
{'auto_toc_tree_section': 'Contents',
'enable_math': True,
'enable_inline_math': True,
'enable_eval_rst': True
}, True)
app.add_transform(AutoStructify)
| 15,709 | 32.283898 | 140 | py |
SyNet | SyNet-master/tensorpack/tensorpack/libinfo.py |
import os
# issue#7378 may happen with custom opencv. It doesn't hurt to disable opencl
os.environ['OPENCV_OPENCL_RUNTIME'] = 'disabled' # https://github.com/opencv/opencv/pull/10155
try:
# issue#1924 may happen on old systems
import cv2 # noqa
# cv2.setNumThreads(0)
if int(cv2.__version__.split('.')[0]) >= 3:
cv2.ocl.setUseOpenCL(False)
# check if cv is built with cuda or openmp
info = cv2.getBuildInformation().split('\n')
for line in info:
splits = line.split()
if not len(splits):
continue
answer = splits[-1].lower()
if answer in ['yes', 'no']:
if 'cuda' in line.lower() and answer == 'yes':
# issue#1197
print("OpenCV is built with CUDA support. "
"This may cause slow initialization or sometimes segfault with TensorFlow.")
if answer == 'openmp':
print("OpenCV is built with OpenMP support. This usually results in poor performance. For details, see "
"https://github.com/tensorpack/benchmarks/blob/master/ImageNet/benchmark-opencv-resize.py")
except (ImportError, TypeError):
pass
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' # issue#9339
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2' # use more warm-up
# Since 1.3, this is not needed
os.environ['TF_AVGPOOL_USE_CUDNN'] = '1' # issue#8566
# TF1.5 features
os.environ['TF_SYNC_ON_FINISH'] = '0' # will become default
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_GPU_THREAD_COUNT'] = '2'
# Available in TF1.6+ & cudnn7. Haven't seen different performance on R50.
# NOTE we disable it because:
# this mode may use scaled atomic integer reduction that may cause a numerical
# overflow for certain input data range.
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0'
# Available since 1.12. issue#15874
# But they're sometimes buggy. We leave this decision to users.
# os.environ['TF_ENABLE_WHILE_V2'] = '1'
# os.environ['TF_ENABLE_COND_V2'] = '1'
try:
import tensorflow as tf # noqa
_version = tf.__version__.split('.')
assert (int(_version[0]), int(_version[1])) >= (1, 3), "TF>=1.3 is required!"
_HAS_TF = True
except ImportError:
print("Failed to import tensorflow.")
_HAS_TF = False
else:
# Install stacktrace handler
try:
from tensorflow.python.framework import test_util
test_util.InstallStackTraceHandler()
except Exception:
pass
# silence the massive deprecation warnings in TF 1.13+
if (int(_version[0]), int(_version[1])) >= (1, 13):
try:
from tensorflow.python.util.deprecation import silence
except Exception:
pass
else:
silence().__enter__()
try:
from tensorflow.python.util import deprecation_wrapper
deprecation_wrapper._PER_MODULE_WARNING_LIMIT = 0
except Exception:
pass
# Monkey-patch tf.test.is_gpu_available to avoid side effects:
# https://github.com/tensorflow/tensorflow/issues/26460
try:
list_dev = tf.config.experimental.list_physical_devices
except AttributeError:
pass
else:
old_is_gpu_available = tf.test.is_gpu_available
def is_gpu_available(*args, **kwargs):
if len(args) == 0 and len(kwargs) == 0:
return len(list_dev('GPU')) > 0
return old_is_gpu_available(*args, **kwargs)
tf.test.is_gpu_available = is_gpu_available
# These lines will be programatically read/write by setup.py
# Don't touch them.
__version__ = '0.10.1'
__git_version__ = __version__
| 3,666 | 34.601942 | 116 | py |
SyNet | SyNet-master/tensorpack/tensorpack/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
from tensorpack.libinfo import __version__, __git_version__, _HAS_TF
from tensorpack.utils import *
from tensorpack.dataflow import *
# dataflow can be used alone without installing tensorflow
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = _HAS_TF
if STATICA_HACK:
from tensorpack.models import *
from tensorpack.callbacks import *
from tensorpack.tfutils import *
from tensorpack.train import *
from tensorpack.input_source import *
from tensorpack.predict import *
| 663 | 26.666667 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/inference.py | # -*- coding: utf-8 -*-
# File: inference.py
import numpy as np
from abc import ABCMeta
import six
from ..tfutils.common import get_op_tensor_name
from ..utils import logger
from ..utils.stats import BinaryStatistics, RatioCounter
from .base import Callback
__all__ = ['ScalarStats', 'Inferencer',
'ClassificationError', 'BinaryClassificationStats']
@six.add_metaclass(ABCMeta)
class Inferencer(Callback):
""" Base class of Inferencer.
Inferencer is a special kind of callback that should be called by :class:`InferenceRunner`.
It has the methods ``_get_fetches`` and ``_on_fetches`` which are like
:class:`SessionRunHooks`, except that they will be used only by :class:`InferenceRunner`.
.. document private functions
.. automethod:: _before_inference
.. automethod:: _after_inference
.. automethod:: _get_fetches
.. automethod:: _on_fetches
"""
def _before_epoch(self):
self._before_inference()
def _before_inference(self):
"""
Called before a new round of inference starts.
"""
pass
def _trigger_epoch(self):
ret = self._after_inference()
if ret is None:
return
for k, v in six.iteritems(ret):
try:
v = float(v)
except ValueError:
logger.warn("{} returns a non-scalar statistics!".format(type(self).__name__))
continue
else:
self.trainer.monitors.put_scalar(k, v)
def _after_inference(self):
"""
Called after a round of inference ends.
Returns a dict of scalar statistics which will be logged to monitors.
"""
pass
def get_fetches(self):
"""
Return a list of tensor names (guaranteed not op name) this inferencer needs.
"""
ret = self._get_fetches()
return [get_op_tensor_name(n)[1] for n in ret]
def _get_fetches(self):
"""
To be implemented by subclasses
"""
raise NotImplementedError()
def on_fetches(self, results):
"""
Called after each new datapoint finished the forward inference.
Args:
results(list): list of results this inferencer fetched. Has the same
length as ``self._get_fetches()``.
"""
self._on_fetches(results)
def _on_fetches(self, results):
"""
To be implemented by subclasses
"""
raise NotImplementedError()
class ScalarStats(Inferencer):
"""
Statistics of some scalar tensor.
The value will be averaged over all given datapoints.
Note that the average of accuracy over all batches is not necessarily the
accuracy of the whole dataset. See :class:`ClassificationError` for details.
"""
def __init__(self, names, prefix='validation'):
"""
Args:
names(list or str): list of names or just one name. The
corresponding tensors have to be scalar.
prefix(str): a prefix for logging
"""
if not isinstance(names, list):
self.names = [names]
else:
self.names = names
self.prefix = prefix
def _before_inference(self):
self.stats = []
def _get_fetches(self):
return self.names
def _on_fetches(self, output):
self.stats.append(output)
def _after_inference(self):
if len(self.stats):
self.stats = np.mean(self.stats, axis=0)
assert len(self.stats) == len(self.names)
ret = {}
for stat, name in zip(self.stats, self.names):
opname, _ = get_op_tensor_name(name)
name = '{}_{}'.format(self.prefix, opname) if self.prefix else opname
ret[name] = stat
return ret
class ClassificationError(Inferencer):
"""
Compute **true** classification error in batch mode, from a ``wrong`` tensor.
The ``wrong`` tensor is supposed to be an binary vector containing
whether each sample in the batch is *incorrectly* classified.
You can use ``tf.nn.in_top_k`` to produce this vector.
This Inferencer produces the "true" error, which could be different from
``ScalarStats('error_rate')``.
It takes account of the fact that batches might not have the same size in
testing (because the size of test set might not be a multiple of batch size).
Therefore the result can be different from averaging the error rate of each batch.
You can also use the "correct prediction" tensor, then this inferencer will
give you "classification accuracy" instead of error.
"""
def __init__(self, wrong_tensor_name='incorrect_vector', summary_name='validation_error'):
"""
Args:
wrong_tensor_name(str): name of the ``wrong`` binary vector tensor.
summary_name(str): the name to log the error with.
"""
self.wrong_tensor_name = wrong_tensor_name
self.summary_name = summary_name
def _before_inference(self):
self.err_stat = RatioCounter()
def _get_fetches(self):
return [self.wrong_tensor_name]
def _on_fetches(self, outputs):
vec = outputs[0]
# TODO put shape assertion into inference-runner
assert vec.ndim == 1, "{} is not a vector!".format(self.wrong_tensor_name)
batch_size = len(vec)
wrong = np.sum(vec)
self.err_stat.feed(wrong, batch_size)
def _after_inference(self):
return {self.summary_name: self.err_stat.ratio}
class BinaryClassificationStats(Inferencer):
"""
Compute precision / recall in binary classification, given the
prediction vector and the label vector.
"""
def __init__(self, pred_tensor_name, label_tensor_name, prefix='val'):
"""
Args:
pred_tensor_name(str): name of the 0/1 prediction tensor.
label_tensor_name(str): name of the 0/1 label tensor.
"""
self.pred_tensor_name = pred_tensor_name
self.label_tensor_name = label_tensor_name
self.prefix = prefix
def _before_inference(self):
self.stat = BinaryStatistics()
def _get_fetches(self):
return [self.pred_tensor_name, self.label_tensor_name]
def _on_fetches(self, outputs):
pred, label = outputs
self.stat.feed(pred, label)
def _after_inference(self):
return {self.prefix + '_precision': self.stat.precision,
self.prefix + '_recall': self.stat.recall}
| 6,553 | 30.358852 | 95 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/base.py | # -*- coding: utf-8 -*-
# File: base.py
from abc import ABCMeta
import six
from ..compat import tfv1 as tf
from ..tfutils.common import get_op_or_tensor_by_name
__all__ = ['Callback', 'ProxyCallback', 'CallbackFactory']
@six.add_metaclass(ABCMeta)
class Callback(object):
""" Base class for all callbacks. See
`Write a Callback
<http://tensorpack.readthedocs.io/tutorial/extend/callback.html>`_
for more detailed explanation of the callback methods.
Attributes:
epoch_num(int): trainer.epoch_num
global_step(int): trainer.global_step
local_step(int): trainer.local_step
trainer(Trainer): the trainer.
graph(tf.Graph): the graph.
Note:
These attributes are available only after (and including)
:meth:`_setup_graph`.
.. document private functions
.. automethod:: _setup_graph
.. automethod:: _before_train
.. automethod:: _after_train
.. automethod:: _before_run
.. automethod:: _after_run
.. automethod:: _before_epoch
.. automethod:: _after_epoch
.. automethod:: _trigger_step
.. automethod:: _trigger_epoch
.. automethod:: _trigger
"""
_chief_only = True
name_scope = ""
"""
A name scope for ops created inside this callback.
By default to the name of the class, but can be set per-instance.
"""
def setup_graph(self, trainer):
self.trainer = trainer
self.graph = tf.get_default_graph()
scope_name = self.name_scope or type(self).__name__
scope_name = scope_name.replace('_', '')
with tf.name_scope(scope_name):
self._setup_graph()
def _setup_graph(self):
"""
Called before finalizing the graph.
Override this method to setup the ops used in the callback.
This is the same as ``tf.train.SessionRunHook.begin()``.
"""
pass
def before_train(self):
self._before_train()
def _before_train(self):
"""
Called right before the first iteration. The main difference to
`setup_graph` is that at this point the graph is finalized and a default session is initialized.
Override this method to, e.g. run some operations under the session.
This is similar to ``tf.train.SessionRunHook.after_create_session()``, but different:
it is called after the session is initialized by :class:`tfutils.SessionInit`.
"""
pass
def before_epoch(self):
self._before_epoch()
def _before_epoch(self):
"""
Called right before each epoch.
Usually you should use the :meth:`trigger` callback to run something between epochs.
Use this method only when something really needs to be run **immediately** before each epoch.
"""
pass
def after_epoch(self):
self._after_epoch()
def _after_epoch(self):
"""
Called right after each epoch.
Usually you should use the :meth:`trigger` callback to run something between epochs.
Use this method only when something really needs to be run **immediately** after each epoch.
"""
pass
def before_run(self, ctx):
fetches = self._before_run(ctx)
if fetches is None:
return None
if isinstance(fetches, tf.train.SessionRunArgs):
return fetches
# also support list of names
assert isinstance(fetches, list), fetches
ret = []
for f in fetches:
if isinstance(f, (tf.Tensor, tf.Operation)):
ret.append(f)
else:
# warn about speed
ret.append(get_op_or_tensor_by_name(f))
return tf.train.SessionRunArgs(fetches=ret)
def _before_run(self, ctx):
"""
It is called before every ``hooked_sess.run()`` call, and it
registers some extra op/tensors to run in the next call.
This method is the same as ``tf.train.SessionRunHook.before_run``.
Refer to TensorFlow docs for more details.
"""
return None
def after_run(self, run_context, run_values):
self._after_run(run_context, run_values)
def _after_run(self, run_context, run_values):
"""
It is called after every ``hooked_sess.run()`` call, and it
processes the values requested by the corresponding :meth:`before_run`.
It is equivalent to ``tf.train.SessionRunHook.after_run()``, refer to
TensorFlow docs for more details.
"""
pass
def trigger_step(self):
self._trigger_step()
def _trigger_step(self):
"""
Called after each :meth:`Trainer.run_step()` completes. Defaults to no-op.
You can override it to implement, e.g. a ProgressBar.
"""
pass
def trigger_epoch(self):
self._trigger_epoch()
def _trigger_epoch(self):
"""
Called after the completion of every epoch. Defaults to call ``self.trigger()``
"""
self.trigger()
def trigger(self):
self._trigger()
def _trigger(self):
"""
Override this method to define a general trigger behavior, to be used with trigger schedulers.
Note that the schedulers (e.g. :class:`PeriodicTrigger`) might call this
method both inside an epoch and after an epoch.
When used without the scheduler, this method by default will be called by `trigger_epoch()`.
"""
pass
def after_train(self):
self._after_train()
def _after_train(self):
"""
Called after training.
"""
pass
@property
def epoch_num(self):
return self.trainer.epoch_num
@property
def global_step(self):
return self.trainer.global_step
@property
def local_step(self):
return self.trainer.local_step
@property
def chief_only(self):
"""
Only run this callback on chief training process.
Returns: bool
"""
return self._chief_only
@chief_only.setter
def chief_only(self, v):
self._chief_only = v
def set_chief_only(self, v=True):
"""
Set chief_only property, and returns the callback itself.
"""
self._chief_only = v
return self
def __str__(self):
return type(self).__name__
# TODO RENAME: same function to be used to get ops as well
def get_tensors_maybe_in_tower(self, names):
"""
Get tensors in the graph with the given names.
Will automatically check for the *first training tower*
if no existing tensor is found with the name.
Returns:
[tf.Tensor]
"""
from ..train.tower import TowerTrainer # noqa
def get_tensor(name):
msg = "Tensor {} not found in the graph!".format(name)
try:
return get_op_or_tensor_by_name(name)
except KeyError:
pass
if not isinstance(self.trainer, TowerTrainer):
raise KeyError(msg)
towers = self.trainer.towers
try:
return towers.training()[0][name]
except KeyError:
raise KeyError(msg)
return [get_tensor(name) for name in names]
class ProxyCallback(Callback):
""" A callback which proxy all methods to another callback.
It's useful as a base class of callbacks which decorate other callbacks.
"""
def __init__(self, cb):
"""
Args:
cb(Callback): the underlying callback
"""
assert isinstance(cb, Callback), type(cb)
self.chief_only = cb.chief_only
self.cb = cb
def _before_train(self):
self.cb.before_train()
def _setup_graph(self):
with tf.name_scope(None):
self.cb.setup_graph(self.trainer)
def _trigger_epoch(self):
self.cb.trigger_epoch()
def _trigger(self):
self.cb.trigger()
def _trigger_step(self):
self.cb.trigger_step()
def _after_train(self):
self.cb.after_train()
def _before_epoch(self):
self.cb.before_epoch()
def _after_epoch(self):
self.cb.after_epoch()
def _before_run(self, ctx):
return self.cb._before_run(ctx)
def _after_run(self, ctx, run_values):
self.cb._after_run(ctx, run_values)
def __str__(self):
return "Proxy-" + str(self.cb)
class CallbackFactory(Callback):
"""
Create a callback with some lambdas.
"""
def __init__(self, setup_graph=None, before_train=None, trigger=None,
after_train=None):
"""
Each lambda takes ``self`` as the only argument.
"""
self._cb_setup_graph = setup_graph
self._cb_before_train = before_train
self._cb_trigger = trigger
self._cb_after_train = after_train
def _setup_graph(self):
if self._cb_setup_graph:
self._cb_setup_graph(self)
def _before_train(self):
if self._cb_before_train:
self._cb_before_train(self)
def _trigger(self):
if self._cb_trigger:
self._cb_trigger(self)
def _after_train(self):
if self._cb_after_train:
self._cb_after_train(self)
def __str__(self):
strs = []
if self._cb_setup_graph is not None:
strs.append("setup_graph=" + str(self._cb_setup_graph))
if self._cb_before_train is not None:
strs.append("before_train=" + str(self._cb_before_train))
if self._cb_trigger is not None:
strs.append("trigger=" + str(self._cb_trigger))
if self._cb_after_train is not None:
strs.append("after_train=" + str(self._cb_after_train))
return "CallbackFactory({})".format(', '.join(strs))
| 9,891 | 28.094118 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/concurrency.py | # -*- coding: utf-8 -*-
# File: concurrency.py
import multiprocessing as mp
from ..utils import logger
from ..utils.concurrency import StoppableThread, start_proc_mask_signal
from .base import Callback
__all__ = ['StartProcOrThread']
class StartProcOrThread(Callback):
"""
Start some threads or processes before training.
"""
_chief_only = False
def __init__(self, startable, stop_at_last=True):
"""
Args:
startable (list): list of processes or threads which have ``start()`` method.
Can also be a single instance of process of thread.
stop_at_last (bool): whether to stop the processes or threads
after training. It will use :meth:`Process.terminate()` or
:meth:`StoppableThread.stop()`, but will do nothing on normal
``threading.Thread`` or other startable objects.
"""
if not isinstance(startable, list):
startable = [startable]
self._procs_threads = startable
self._stop_at_last = stop_at_last
def _before_train(self):
logger.info("Starting " +
', '.join([k.name for k in self._procs_threads]) + ' ...')
# avoid sigint get handled by other processes
start_proc_mask_signal(self._procs_threads)
def _after_train(self):
if not self._stop_at_last:
return
for k in self._procs_threads:
if not k.is_alive():
continue
if isinstance(k, mp.Process):
logger.info("Stopping {} ...".format(k.name))
k.terminate()
k.join(5.0)
if k.is_alive():
logger.error("Cannot join process {}.".format(k.name))
elif isinstance(k, StoppableThread):
logger.info("Stopping {} ...".format(k.name))
k.stop()
k.join(5.0)
if k.is_alive():
logger.error("Cannot join thread {}.".format(k.name))
| 2,042 | 33.627119 | 89 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/hooks.py | # -*- coding: utf-8 -*-
# File: hooks.py
""" Compatible layers between tf.train.SessionRunHook and Callback"""
import tensorflow as tf
from ..compat import tfv1
from ..utils.develop import HIDE_DOC
from .base import Callback
__all__ = ['CallbackToHook', 'HookToCallback', 'TFLocalCLIDebugHook']
class CallbackToHook(tfv1.train.SessionRunHook):
"""
Hooks are less powerful than callbacks so the conversion is incomplete.
It only converts the ``before_run/after_run`` calls.
This is only for internal implementation of
``before_run/after_run`` callbacks.
You shouldn't need to use this.
"""
def __init__(self, cb):
self._cb = cb
@HIDE_DOC
def before_run(self, ctx):
return self._cb.before_run(ctx)
@HIDE_DOC
def after_run(self, ctx, vals):
self._cb.after_run(ctx, vals)
class HookToCallback(Callback):
"""
Make a ``tf.train.SessionRunHook`` into a callback.
Note that when ``SessionRunHook.after_create_session`` is called, the ``coord`` argument will be None.
"""
_chief_only = False
def __init__(self, hook):
"""
Args:
hook (tf.train.SessionRunHook):
"""
self._hook = hook
def _setup_graph(self):
with tf.name_scope(None): # jump out of the name scope
self._hook.begin()
def _before_train(self):
sess = tf.get_default_session()
# coord is set to None when converting
self._hook.after_create_session(sess, None)
def _before_run(self, ctx):
return self._hook.before_run(ctx)
def _after_run(self, ctx, run_values):
self._hook.after_run(ctx, run_values)
def _after_train(self):
self._hook.end(self.trainer.sess)
class TFLocalCLIDebugHook(HookToCallback):
"""
Use the hook `tfdbg.LocalCLIDebugHook` in tensorpack.
"""
_chief_only = True
def __init__(self, *args, **kwargs):
"""
Args:
args, kwargs: arguments to create `tfdbg.LocalCLIDebugHook`.
Refer to tensorflow documentation for details.
"""
from tensorflow.python import debug as tfdbg
super(TFLocalCLIDebugHook, self).__init__(tfdbg.LocalCLIDebugHook(*args, **kwargs))
def add_tensor_filter(self, *args, **kwargs):
"""
Wrapper of `tfdbg.LocalCLIDebugHook.add_tensor_filter`.
Refer to tensorflow documentation for details.
"""
self._hook.add_tensor_filter(*args, **kwargs)
| 2,511 | 25.442105 | 106 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/param_test.py | # -*- coding: utf-8 -*-
import unittest
import tensorflow as tf
from ..utils import logger
from ..train.trainers import NoOpTrainer
from .param import ScheduledHyperParamSetter, ObjAttrParam
class ParamObject(object):
"""
An object that holds the param to be set, for testing purposes.
"""
PARAM_NAME = 'param'
def __init__(self):
self.param_history = {}
self.__dict__[self.PARAM_NAME] = 1.0
def __setattr__(self, name, value):
if name == self.PARAM_NAME:
self._set_param(value)
super(ParamObject, self).__setattr__(name, value)
def _set_param(self, value):
self.param_history[self.trainer.global_step] = value
class ScheduledHyperParamSetterTest(unittest.TestCase):
def setUp(self):
self._param_obj = ParamObject()
def tearDown(self):
tf.reset_default_graph()
def _create_trainer_with_scheduler(self, scheduler,
steps_per_epoch, max_epoch, starting_epoch=1):
trainer = NoOpTrainer()
tf.get_variable(name='test_var', shape=[])
self._param_obj.trainer = trainer
trainer.train_with_defaults(
callbacks=[scheduler],
extra_callbacks=[],
monitors=[],
steps_per_epoch=steps_per_epoch,
max_epoch=max_epoch,
starting_epoch=starting_epoch
)
return self._param_obj.param_history
def testInterpolation(self):
scheduler = ScheduledHyperParamSetter(
ObjAttrParam(self._param_obj, ParamObject.PARAM_NAME),
[(30, 0.3), (40, 0.4), (50, 0.5)], interp='linear', step_based=True)
history = self._create_trainer_with_scheduler(scheduler, 10, 50, starting_epoch=20)
self.assertEqual(min(history.keys()), 30)
self.assertEqual(history[30], 0.3)
self.assertEqual(history[40], 0.4)
self.assertEqual(history[45], 0.45)
def testSchedule(self):
scheduler = ScheduledHyperParamSetter(
ObjAttrParam(self._param_obj, ParamObject.PARAM_NAME),
[(10, 0.3), (20, 0.4), (30, 0.5)])
history = self._create_trainer_with_scheduler(scheduler, 1, 50)
self.assertEqual(min(history.keys()), 10)
self.assertEqual(len(history), 3)
def testStartAfterSchedule(self):
scheduler = ScheduledHyperParamSetter(
ObjAttrParam(self._param_obj, ParamObject.PARAM_NAME),
[(10, 0.3), (20, 0.4), (30, 0.5)])
history = self._create_trainer_with_scheduler(scheduler, 1, 92, starting_epoch=90)
self.assertEqual(len(history), 0)
def testWarningStartInTheMiddle(self):
scheduler = ScheduledHyperParamSetter(
ObjAttrParam(self._param_obj, ParamObject.PARAM_NAME),
[(10, 0.3), (20, 0.4), (30, 0.5)])
with self.assertLogs(logger=logger._logger, level='WARNING'):
self._create_trainer_with_scheduler(scheduler, 1, 21, starting_epoch=20)
def testNoWarningStartInTheMiddle(self):
scheduler = ScheduledHyperParamSetter(
ObjAttrParam(self._param_obj, ParamObject.PARAM_NAME),
[(10, 0.3), (20, 1.0), (30, 1.5)])
with unittest.mock.patch('tensorpack.utils.logger.warning') as warning:
self._create_trainer_with_scheduler(scheduler, 1, 22, starting_epoch=21)
self.assertFalse(warning.called)
if __name__ == '__main__':
unittest.main()
| 3,456 | 35.776596 | 91 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/steps.py | # -*- coding: utf-8 -*-
# File: steps.py
""" Some common step callbacks. """
import tqdm
from six.moves import zip
from ..compat import tfv1 as tf
from ..tfutils.common import get_global_step_var, get_op_tensor_name
from ..utils import logger
from ..utils.naming import GLOBAL_STEP_INCR_OP_NAME
from ..utils.utils import get_tqdm_kwargs
from .base import Callback
__all__ = ['TensorPrinter', 'ProgressBar', 'SessionRunTimeout']
class TensorPrinter(Callback):
""" Prints the value of some tensors in each step.
It's an example of how ``before_run/after_run`` works.
"""
def __init__(self, names):
"""
Args:
names(list): list of string, the names of the tensors to print.
"""
names = [get_op_tensor_name(n)[1] for n in names]
logger.warn("Using tf.Print in the graph is much faster than TensorPrinter!")
self._names = names
def _setup_graph(self):
self._fetches = self.get_tensors_maybe_in_tower(self._names)
def _before_run(self, _):
return self._fetches
def _after_run(self, _, vals):
args = vals.results
assert len(args) == len(self._names), len(args)
for n, v in zip(self._names, args):
logger.info("{}: {}".format(n, v))
class ProgressBar(Callback):
""" A progress bar based on tqdm.
This callback is one of the :func:`DEFAULT_CALLBACKS()`.
"""
_chief_only = False
def __init__(self, names=()):
"""
Args:
names(tuple[str]): the names of the tensors to monitor
on the progress bar.
"""
super(ProgressBar, self).__init__()
self._names = [get_op_tensor_name(n)[1] for n in names]
self._tags = [get_op_tensor_name(n)[0].split("/")[-1] for n in names]
self._bar = None
def _before_train(self):
self._last_updated = self.local_step
self._total = self.trainer.steps_per_epoch
self._tqdm_args = get_tqdm_kwargs(leave=True)
self._fetches = self.get_tensors_maybe_in_tower(self._names) or None
if self._fetches:
for t in self._fetches:
assert t.shape.ndims == 0, "ProgressBar can only print scalars, not {}".format(t)
self._fetches = tf.train.SessionRunArgs(self._fetches)
self._tqdm_args['bar_format'] = self._tqdm_args['bar_format'] + "{postfix} "
def _before_epoch(self):
self._bar = tqdm.trange(self._total, **self._tqdm_args)
def _after_epoch(self):
self._bar.close()
def _before_run(self, _):
# update progress bar when local step changed (one step is finished)
if self.local_step != self._last_updated:
self._last_updated = self.local_step
return self._fetches
else:
return None
def _after_run(self, _, run_values):
res = run_values.results
if res:
self._bar.set_postfix(zip(self._tags, res))
def _trigger_step(self):
self._bar.update()
def _after_train(self):
if self._bar: # training may get killed before the first step
self._bar.close()
class MaintainStepCounter(Callback):
"""
It maintains the global step in the graph, making sure it's increased by one at every `hooked_sess.run`.
This callback is used internally by the trainer, you don't need to worry about it.
"""
_chief_only = False
"""
In distributed training, we let each worker maintain its local global_step.
"""
def _setup_graph(self):
# ensure it exists
gs_var = get_global_step_var()
with tf.name_scope(None):
self.gs_incr_op = tf.assign_add(
gs_var, 1,
name=GLOBAL_STEP_INCR_OP_NAME).op
self._fetches = tf.train.SessionRunArgs(self.gs_incr_op)
def _before_train(self):
if self.global_step != 0:
logger.info("Start training with global_step={}".format(self.global_step))
def _before_run(self, _):
# always increase global_step when hooked_sess.run is called
return self._fetches
def _after_run(self, _, __):
# Keep python-side global_step in agreement with TF-side
self.trainer.loop._global_step += 1
class SessionRunTimeout(Callback):
"""
Add timeout option to each sess.run call.
"""
def __init__(self, timeout_in_ms):
"""
Args:
timeout_in_ms (int):
"""
self._timeout = int(timeout_in_ms)
opt = tf.RunOptions(timeout_in_ms=timeout_in_ms)
self._runargs = tf.train.SessionRunArgs(fetches=[], options=opt)
def _before_run(self, _):
return self._runargs
| 4,725 | 29.688312 | 108 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/stats.py | # -*- coding: utf-8 -*-
# File: stats.py
from .graph import DumpParamAsImage # noqa
# for compatibility only
from .misc import InjectShell, SendStat # noqa
__all__ = []
| 173 | 18.333333 | 47 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/saver.py | # -*- coding: utf-8 -*-
# File: saver.py
import os
from datetime import datetime
from ..compat import tfv1 as tf
from ..utils import fs, logger
from .base import Callback
__all__ = ['ModelSaver', 'MinSaver', 'MaxSaver']
class ModelSaver(Callback):
"""
Save the model once triggered.
"""
def __init__(self, max_to_keep=10,
keep_checkpoint_every_n_hours=0.5,
checkpoint_dir=None,
var_collections=None):
"""
Args:
max_to_keep (int): the same as in ``tf.train.Saver``.
keep_checkpoint_every_n_hours (float): the same as in ``tf.train.Saver``.
Note that "keep" does not mean "create", but means "don't delete".
checkpoint_dir (str): Defaults to ``logger.get_logger_dir()``.
var_collections (str or list of str): collection of the variables (or list of collections) to save.
"""
if var_collections is None:
var_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
self._max_to_keep = max_to_keep
self._keep_every_n_hours = keep_checkpoint_every_n_hours
if not isinstance(var_collections, list):
var_collections = [var_collections]
self.var_collections = var_collections
if checkpoint_dir is None:
checkpoint_dir = logger.get_logger_dir()
if checkpoint_dir is not None:
if not tf.gfile.IsDirectory(checkpoint_dir): # v2: tf.io.gfile.isdir
tf.gfile.MakeDirs(checkpoint_dir) # v2: tf.io.gfile.makedirs
# If None, allow it to be init, but fail later if used
# For example, if chief_only=True, it can still be safely initialized
# in non-chief workers which don't have logger dir
self.checkpoint_dir = fs.normpath(checkpoint_dir) if checkpoint_dir is not None else checkpoint_dir
def _setup_graph(self):
assert self.checkpoint_dir is not None, \
"Please provide 'checkpoint_dir' for ModelSaver, or use logger.set_logger_dir()"
vars = []
for key in self.var_collections:
vars.extend(tf.get_collection(key))
vars = list(set(vars))
self.path = os.path.join(self.checkpoint_dir, 'model')
self.saver = tf.train.Saver(
var_list=vars,
max_to_keep=self._max_to_keep,
keep_checkpoint_every_n_hours=self._keep_every_n_hours,
write_version=tf.train.SaverDef.V2,
save_relative_paths=True)
# Scaffold will call saver.build from this collection
tf.add_to_collection(tf.GraphKeys.SAVERS, self.saver)
def _before_train(self):
# graph is finalized, OK to write it now.
time = datetime.now().strftime('%m%d-%H%M%S')
self.saver.export_meta_graph(
os.path.join(self.checkpoint_dir,
'graph-{}.meta'.format(time)),
collection_list=self.graph.get_all_collection_keys())
def _trigger(self):
try:
self.saver.save(
tf.get_default_session(),
self.path,
global_step=tf.train.get_global_step(),
write_meta_graph=False)
logger.info("Model saved to %s." % tf.train.get_checkpoint_state(self.checkpoint_dir).model_checkpoint_path)
except (IOError, tf.errors.PermissionDeniedError,
tf.errors.ResourceExhaustedError): # disk error sometimes.. just ignore it
logger.exception("Exception in ModelSaver!")
class MinSaver(Callback):
"""
Separately save the model with minimum value of some statistics.
"""
def __init__(self, monitor_stat, reverse=False, filename=None, checkpoint_dir=None):
"""
Args:
monitor_stat(str): the name of the statistics.
reverse (bool): if True, will save the maximum.
filename (str): the name for the saved model.
Defaults to ``min-{monitor_stat}.tfmodel``.
checkpoint_dir (str): the directory containing checkpoints.
Example:
Save the model with minimum validation error to
"min-val-error.tfmodel":
.. code-block:: python
MinSaver('val-error')
Note:
1. It assumes that :class:`ModelSaver` is used with the same ``checkpoint_dir``
and appears earlier in the callback list.
The default for both :class:`ModelSaver` and :class:`MinSaver`
is ``checkpoint_dir=logger.get_logger_dir()``
2. Callbacks are executed in the order they are defined. Therefore you'd want to
use this callback after the callback (e.g. InferenceRunner) that produces the statistics.
"""
self.monitor_stat = monitor_stat
self.reverse = reverse
self.filename = filename
self.best = None
self.checkpoint_dir = checkpoint_dir
if self.checkpoint_dir is None:
self.checkpoint_dir = logger.get_logger_dir()
self.checkpoint_dir = fs.normpath(self.checkpoint_dir)
def _get_stat(self):
try:
v = self.trainer.monitors.get_history(self.monitor_stat)[-1]
except (KeyError, IndexError):
v = None, None
return v
def _trigger(self):
curr_step, curr_val = self._get_stat()
if curr_step is None:
return
if self.best is None or (curr_val > self.best[1] if self.reverse else curr_val < self.best[1]):
self.best = (curr_step, curr_val)
self._save()
def _save(self):
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt is None:
raise RuntimeError(
"[MinSaver] Cannot find a checkpoint state. Do you forget to use ModelSaver?")
path = ckpt.model_checkpoint_path
extreme_name = 'maximum' if self.reverse else 'minimum'
if not path.endswith(str(self.best[0])):
logger.warn("[MinSaver] New {} '{}' found at global_step={}, but the latest checkpoint is {}.".format(
extreme_name, self.monitor_stat, self.best[0], path
))
logger.warn("MinSaver will do nothing this time. "
"The callbacks may have inconsistent frequency or wrong order.")
return
newname = os.path.join(self.checkpoint_dir,
self.filename or
('max-' + self.monitor_stat if self.reverse else 'min-' + self.monitor_stat))
files_to_copy = tf.gfile.Glob(path + '*')
for file_to_copy in files_to_copy:
tf.gfile.Copy(file_to_copy, file_to_copy.replace(path, newname), overwrite=True)
logger.info("Model at global_step={} with {} {}={:.5g} saved.".format(
self.best[0], extreme_name, self.monitor_stat, self.best[1]))
class MaxSaver(MinSaver):
"""
Separately save the model with maximum value of some statistics.
See docs of :class:`MinSaver` for details.
"""
def __init__(self, monitor_stat, filename=None, checkpoint_dir=None):
"""
Args:
monitor_stat(str): the name of the statistics.
filename (str): the name for the saved model.
Defaults to ``max-{monitor_stat}.tfmodel``.
"""
super(MaxSaver, self).__init__(monitor_stat, True, filename=filename, checkpoint_dir=checkpoint_dir)
| 7,479 | 40.098901 | 120 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/group.py | # -*- coding: utf-8 -*-
# File: group.py
import traceback
from contextlib import contextmanager
from time import perf_counter as timer # noqa
from ..compat import tfv1 as tf
from ..utils import logger
from ..utils.utils import humanize_time_delta
from .base import Callback
from .hooks import CallbackToHook
__all__ = ['Callbacks']
class CallbackTimeLogger(object):
def __init__(self):
self.times = []
self.tot = 0
def add(self, name, time):
self.tot += time
self.times.append((name, time))
@contextmanager
def timed_callback(self, name):
s = timer()
yield
self.add(name, timer() - s)
def log(self):
""" log the time of some heavy callbacks """
if self.tot < 3:
return
msgs = []
for name, t in self.times:
if t / self.tot > 0.3 and t > 1:
msgs.append(name + ": " + humanize_time_delta(t))
logger.info(
"Callbacks took {:.3f} sec in total. {}".format(
self.tot, '; '.join(msgs)))
class Callbacks(Callback):
"""
A container to hold all callbacks, and trigger them iteratively.
This is only used by the base trainer to run all the callbacks.
Users do not need to use this class.
"""
def __init__(self, cbs):
"""
Args:
cbs(list): a list of :class:`Callback` instances.
"""
# check type
for cb in cbs:
assert isinstance(cb, Callback), cb.__class__
self.cbs = cbs
def _setup_graph(self):
with tf.name_scope(None): # clear the name scope
for cb in self.cbs:
cb.setup_graph(self.trainer)
def _before_train(self):
for cb in self.cbs:
cb.before_train()
def _after_train(self):
for cb in self.cbs:
# make sure callbacks are properly finalized
try:
cb.after_train()
except Exception:
traceback.print_exc()
def get_hooks(self):
return [CallbackToHook(cb) for cb in self.cbs]
def trigger_step(self):
for cb in self.cbs:
cb.trigger_step()
def _trigger_epoch(self):
tm = CallbackTimeLogger()
for cb in self.cbs:
display_name = str(cb)
with tm.timed_callback(display_name):
cb.trigger_epoch()
tm.log()
def _before_epoch(self):
for cb in self.cbs:
cb.before_epoch()
def _after_epoch(self):
for cb in self.cbs:
cb.after_epoch()
| 2,611 | 23.87619 | 68 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/prof.py | # -*- coding: utf-8 -*-
# File: prof.py
import multiprocessing as mp
import numpy as np
import os
import time
import tensorflow as tf
from six.moves import map, queue
import psutil
from ..tfutils.common import gpu_available_in_session
from ..utils import logger
from ..utils.timer import Timer
from ..utils.concurrency import ensure_proc_terminate, start_proc_mask_signal
from ..utils.gpu import get_num_gpu
from ..utils.nvml import NVMLContext
from .base import Callback
__all__ = ['GPUUtilizationTracker', 'GraphProfiler', 'PeakMemoryTracker',
'GPUMemoryTracker', 'HostMemoryTracker', 'ThroughputTracker']
class GPUUtilizationTracker(Callback):
""" Summarize the average GPU utilization within an epoch.
It will start a process to obtain GPU utilization through NVML every second
within the epoch (the trigger_epoch time was not included),
and write average utilization to monitors.
This callback creates a process, therefore it's not safe to be used with MPI.
"""
_chief_only = False
def __init__(self, devices=None):
"""
Args:
devices (list[int]): physical GPU ids to monitor. If None, will guess from the environment.
"""
assert os.name != 'nt', "GPUUtilizationTracker does not support windows!"
self._devices = devices
self._enabled = True
def _guess_devices(self):
env = os.environ.get('CUDA_VISIBLE_DEVICES')
if env is None:
devices = list(range(get_num_gpu()))
if len(devices) > 1:
logger.warn("[GPUUtilizationTracker] Both devices and CUDA_VISIBLE_DEVICES are None! "
"Will monitor all {} visible GPUs!".format(len(devices)))
else:
if len(env):
devices = list(map(int, env.split(',')))
else:
devices = []
return devices
def _setup_graph(self):
# special heuristics for Horovod
from ..train import HorovodTrainer
if isinstance(self.trainer, HorovodTrainer):
if self.trainer.mpi_enabled():
logger.warn("GPUUtilizationTracker is disabled under MPI.")
self._enabled = False
return
else:
self._devices = [self.trainer.hvd.local_rank()]
if self._devices is None:
self._devices = self._guess_devices()
assert len(self._devices), "[GPUUtilizationTracker] No GPU device given!"
self._evt = mp.Event()
self._stop_evt = mp.Event()
self._queue = mp.Queue()
self._proc = mp.Process(target=self.worker, args=(
self._evt, self._queue, self._stop_evt, self._devices))
ensure_proc_terminate(self._proc)
start_proc_mask_signal(self._proc)
def _before_train(self):
assert gpu_available_in_session(), "[GPUUtilizationTracker] needs GPU!"
def _before_epoch(self):
if self._enabled:
self._evt.set()
def _after_epoch(self):
if self._enabled:
while self._evt.is_set(): # unlikely, unless the epoch is extremely fast
pass
self._evt.set()
def _trigger_epoch(self):
# Don't do this in after_epoch because
# before,after_epoch are supposed to be extremely fast by design.
if not self._enabled:
return
try:
stats = self._queue.get(timeout=60)
except queue.Empty:
if self._proc.is_alive():
raise RuntimeError("GPUUtilization.worker() is stuck. This is a bug.")
else:
raise RuntimeError("GPUUtilization.worker() process is killed unexpectedly.")
if isinstance(stats, int) and stats == -1:
from ..train.base import StopTraining
raise StopTraining("GPUUtilizationTracker.worker has failed.")
for idx, dev in enumerate(self._devices):
self.trainer.monitors.put_scalar('GPUUtil/{}'.format(dev), stats[idx])
def _after_train(self):
if self._enabled:
self._stop_evt.set()
self._evt.set()
self._proc.terminate()
@staticmethod
def worker(evt, rst_queue, stop_evt, devices):
"""
Args:
devices (list[int])
"""
with NVMLContext() as ctx:
devices = [ctx.device(i) for i in devices]
while True:
try:
evt.wait() # start epoch
evt.clear()
if stop_evt.is_set(): # or on exit
return
stats = np.zeros((len(devices),), dtype='f4')
cnt = 0
while True:
time.sleep(1)
data = [d.utilization()['gpu'] for d in devices]
data = list(map(float, data))
stats += data
cnt += 1
if evt.is_set(): # stop epoch
if stop_evt.is_set(): # or on exit
return
evt.clear()
if cnt > 1:
# Ignore the last datapoint. Usually is zero, makes us underestimate the util.
stats -= data
cnt -= 1
rst_queue.put(stats / cnt)
break
except Exception:
logger.exception("Exception in GPUUtilizationTracker.worker")
rst_queue.put(-1)
return
# Can add more features from tfprof
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/profiler/README.md
class GraphProfiler(Callback):
"""
Enable profiling by installing session hooks,
and write tracing files / events / metadata to ``logger.get_logger_dir()``.
The tracing files can be loaded from ``chrome://tracing``.
The metadata files can be processed by
`tfprof command line utils
<https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/profiler/README.md>`_.
The event is viewable from tensorboard.
Tips:
Note that the profiling is by default enabled for every step and is expensive.
You probably want to schedule it less frequently, e.g.:
.. code-block:: none
EnableCallbackIf(
GraphProfiler(dump_tracing=True, dump_event=True),
lambda self: self.trainer.global_step > 20 and self.trainer.global_step < 30)
"""
def __init__(self, dump_metadata=False, dump_tracing=True, dump_event=False):
"""
Args:
dump_metadata(bool): Dump :class:`tf.RunMetadata` to be used with tfprof.
dump_tracing(bool): Dump chrome tracing files.
dump_event(bool): Dump to an event processed by FileWriter and
will be shown in TensorBoard.
"""
self._dir = logger.get_logger_dir()
self._dump_meta = bool(dump_metadata)
self._dump_tracing = bool(dump_tracing)
self._dump_event = bool(dump_event)
assert os.path.isdir(self._dir), self._dir
def _before_run(self, _):
opt = tf.RunOptions()
opt.trace_level = tf.RunOptions.FULL_TRACE
return tf.train.SessionRunArgs(fetches=None, options=opt)
def _after_run(self, _, run_values):
meta = run_values.run_metadata
if self._dump_meta:
self._write_meta(meta)
if self._dump_tracing:
self._write_tracing(meta)
if self._dump_event:
self._write_event(meta)
def _write_meta(self, metadata):
fname = os.path.join(
self._dir, 'runmetadata-{}.pb'.format(self.global_step))
with open(fname, 'wb') as f:
f.write(metadata.SerializeToString())
def _write_tracing(self, metadata):
from tensorflow.python.client import timeline
tl = timeline.Timeline(step_stats=metadata.step_stats)
fname = os.path.join(
self._dir, 'chrome-trace-{}.json'.format(self.global_step))
with open(fname, 'w') as f:
f.write(tl.generate_chrome_trace_format(
show_dataflow=True, show_memory=True))
def _write_event(self, metadata):
evt = tf.Event()
evt.tagged_run_metadata.tag = 'trace-{}'.format(self.global_step)
evt.tagged_run_metadata.run_metadata = metadata.SerializeToString()
self.trainer.monitors.put_event(evt)
class GPUMemoryTracker(Callback):
"""
Track peak memory used on each GPU device every epoch, by :mod:`tf.contrib.memory_stats`.
The peak memory comes from the ``MaxBytesInUse`` op, which is the peak memory used
in recent ``session.run`` calls.
See https://github.com/tensorflow/tensorflow/pull/13107.
"""
_chief_only = False
def __init__(self, devices=(0,)):
"""
Args:
devices([int] or [str]): list of GPU devices to track memory on.
"""
assert isinstance(devices, (list, tuple)), devices
devices = ['/gpu:{}'.format(x) if isinstance(x, int) else x for x in devices]
self._devices = devices
def _setup_graph(self):
from tensorflow.contrib.memory_stats import MaxBytesInUse
ops = []
for dev in self._devices:
with tf.device(dev):
ops.append(MaxBytesInUse())
self._fetches = tf.train.SessionRunArgs(fetches=ops)
def _before_train(self):
assert gpu_available_in_session(), "PeakMemoryTracker only supports GPU!"
def _before_run(self, _):
if self.local_step == self.trainer.steps_per_epoch - 1:
return self._fetches
return None
def _after_run(self, _, rv):
results = rv.results
if results is not None:
for mem, dev in zip(results, self._devices):
self.trainer.monitors.put_scalar('PeakMemory(MB)' + dev, mem / 1e6)
PeakMemoryTracker = GPUMemoryTracker
class HostMemoryTracker(Callback):
"""
Track free RAM on the host.
When triggered, it writes the size of free RAM into monitors.
"""
_chief_only = False
def _setup_graph(self):
logger.info("[HostMemoryTracker] Free RAM in setup_graph() is {:.2f} GB.".format(self._free_ram_gb()))
def _before_train(self):
logger.info("[HostMemoryTracker] Free RAM in before_train() is {:.2f} GB.".format(self._free_ram_gb()))
def _trigger(self):
ram_gb = self._free_ram_gb()
self.trainer.monitors.put_scalar('HostFreeMemory (GB)', ram_gb)
def _free_ram_gb(self):
return psutil.virtual_memory().available / 1024**3
class ThroughputTracker(Callback):
"""
This callback writes the training throughput (in terms of either steps/sec, or samples/sec)
to the monitors everytime it is triggered.
The throughput is computed based on the duration between the consecutive triggers.
The time spent on callbacks after each epoch is excluded.
"""
_chief_only = False
def __init__(self, samples_per_step=None):
"""
Args:
samples_per_step (int or None): total number of samples processed in each step
(i.e., your total batch size in each step).
If not provided, this callback will record "steps/sec" instead of "samples/sec".
"""
if samples_per_step is not None:
samples_per_step = int(samples_per_step)
self._samples_per_step = samples_per_step
self._timer = Timer()
self._timer.pause()
# only include the time between before_epoch/after_epoch
def _before_epoch(self):
self._timer.resume()
def _after_epoch(self):
self._timer.pause()
def _before_train(self):
self._update_last()
def _update_last(self):
old_pause = self._timer.is_paused()
self._timer.reset()
if old_pause:
self._timer.pause()
self._last_step = self.global_step
def _trigger(self):
steps_per_sec = (self.global_step - self._last_step) / self._timer.seconds()
self._update_last()
if self._samples_per_step is None:
self.trainer.monitors.put_scalar("Throughput (steps/sec)", steps_per_sec)
else:
self.trainer.monitors.put_scalar("Throughput (samples/sec)", steps_per_sec * self._samples_per_step)
| 12,529 | 34.495751 | 112 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/trigger.py | # -*- coding: utf-8 -*-
# File: trigger.py
from .base import Callback, ProxyCallback
__all__ = ['PeriodicTrigger', 'PeriodicCallback', 'EnableCallbackIf']
class PeriodicTrigger(ProxyCallback):
"""
Trigger a callback every k global steps or every k epochs by its :meth:`trigger()` method.
Most existing callbacks which do something every epoch are implemented
with :meth:`trigger()` method. By default the :meth:`trigger()` method will be called every epoch.
This wrapper can make the callback run at a different frequency.
All other methods (``before/after_run``, ``trigger_step``, etc) of the given callback
are unaffected. They will still be called as-is.
"""
def __init__(self, triggerable, every_k_steps=None, every_k_epochs=None, before_train=False):
"""
Args:
triggerable (Callback): a Callback instance with a trigger method to be called.
every_k_steps (int): trigger when ``global_step % k == 0``. Set to
None to ignore.
every_k_epochs (int): trigger when ``epoch_num % k == 0``. Set to
None to ignore.
before_train (bool): trigger in the :meth:`before_train` method.
every_k_steps and every_k_epochs can be both set, but cannot be both None unless before_train is True.
"""
assert isinstance(triggerable, Callback), type(triggerable)
super(PeriodicTrigger, self).__init__(triggerable)
if before_train is False:
assert (every_k_epochs is not None) or (every_k_steps is not None), \
"Arguments to PeriodicTrigger have disabled the triggerable!"
self._step_k = every_k_steps
self._epoch_k = every_k_epochs
self._do_before_train = before_train
def _before_train(self):
self.cb.before_train()
if self._do_before_train:
self.cb.trigger()
def _trigger_step(self):
self.cb.trigger_step()
if self._step_k is None:
return
if self.global_step % self._step_k == 0:
self.cb.trigger()
def _trigger_epoch(self):
if self._epoch_k is None:
return
if self.epoch_num % self._epoch_k == 0:
self.cb.trigger()
def __str__(self):
return "PeriodicTrigger-" + str(self.cb)
class EnableCallbackIf(ProxyCallback):
"""
Disable the ``{before,after}_epoch``, ``{before,after}_run``,
``trigger_{epoch,step}``
methods of a callback, unless some condition satisfies.
The other methods are unaffected.
A more accurate name for this callback should be "DisableCallbackUnless", but that's too ugly.
Note:
If you use ``{before,after}_run``,
``pred`` will be evaluated only in ``before_run``.
"""
def __init__(self, callback, pred):
"""
Args:
callback (Callback):
pred (self -> bool): a callable predicate. Has to be a pure function.
The callback is disabled unless this predicate returns True.
"""
self._pred = pred
super(EnableCallbackIf, self).__init__(callback)
def _before_run(self, ctx):
if self._pred(self):
self._enabled = True
return super(EnableCallbackIf, self)._before_run(ctx)
else:
self._enabled = False
def _after_run(self, ctx, rv):
if self._enabled:
super(EnableCallbackIf, self)._after_run(ctx, rv)
def _before_epoch(self):
if self._pred(self):
super(EnableCallbackIf, self)._before_epoch()
def _after_epoch(self):
if self._pred(self):
super(EnableCallbackIf, self)._after_epoch()
def _trigger_epoch(self):
if self._pred(self):
super(EnableCallbackIf, self)._trigger_epoch()
def _trigger_step(self):
if self._pred(self):
super(EnableCallbackIf, self)._trigger_step()
def __str__(self):
return "EnableCallbackIf-" + str(self.cb)
class PeriodicCallback(EnableCallbackIf):
"""
The ``{before,after}_epoch``, ``{before,after}_run``, ``trigger_{epoch,step}``
methods of the given callback will be enabled only when ``global_step % every_k_steps == 0`
or ``epoch_num % every_k_epochs == 0``. The other methods are unaffected.
Note that this can only makes a callback **less** frequent than itself.
If you have a callback that by default runs every epoch by its :meth:`trigger()` method,
use :class:`PeriodicTrigger` to schedule it more frequent than itself.
"""
def __init__(self, callback, every_k_steps=None, every_k_epochs=None):
"""
Args:
callback (Callback): a Callback instance.
every_k_steps (int): enable the callback when ``global_step % k == 0``. Set to
None to ignore.
every_k_epochs (int): enable the callback when ``epoch_num % k == 0``.
Also enable when the last step finishes (``epoch_num == max_epoch``
and ``local_step == steps_per_epoch - 1``). Set to None to ignore.
every_k_steps and every_k_epochs can be both set, but cannot be both None.
"""
assert isinstance(callback, Callback), type(callback)
assert (every_k_epochs is not None) or (every_k_steps is not None), \
"every_k_steps and every_k_epochs cannot be both None!"
self._step_k = every_k_steps
self._epoch_k = every_k_epochs
super(PeriodicCallback, self).__init__(callback, PeriodicCallback.predicate)
def predicate(self):
if self._step_k is not None and self.global_step % self._step_k == 0:
return True
if self._epoch_k is not None and self.epoch_num % self._epoch_k == 0:
return True
if self._epoch_k is not None:
if self.local_step == self.trainer.steps_per_epoch - 1 and \
self.epoch_num == self.trainer.max_epoch:
return True
return False
def __str__(self):
return "PeriodicCallback-" + str(self.cb)
| 6,108 | 36.478528 | 110 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/misc.py | # -*- coding: utf-8 -*-
# File: misc.py
import numpy as np
import os
import time
from collections import deque
from ..utils import logger
from ..utils.utils import humanize_time_delta
from .base import Callback
__all__ = ['SendStat', 'InjectShell', 'EstimatedTimeLeft']
class SendStat(Callback):
""" An equivalent of :class:`SendMonitorData`, but as a normal callback. """
def __init__(self, command, names):
self.command = command
if not isinstance(names, list):
names = [names]
self.names = names
def _trigger(self):
M = self.trainer.monitors
v = {k: M.get_latest(k) for k in self.names}
cmd = self.command.format(**v)
ret = os.system(cmd)
if ret != 0:
logger.error("Command {} failed with ret={}!".format(cmd, ret))
class InjectShell(Callback):
"""
Allow users to create a specific file as a signal to pause
and iteratively debug the training.
Once the :meth:`trigger` method is called, it detects whether the file exists, and opens an
IPython/pdb shell if yes.
In the shell, ``self`` is this callback, ``self.trainer`` is the trainer, and
from that you can access everything else.
Example:
.. code-block:: none
callbacks=[InjectShell('/path/to/pause-training.tmp'), ...]
# the following command will pause the training and start a shell when the epoch finishes:
$ touch /path/to/pause-training.tmp
"""
def __init__(self, file='INJECT_SHELL.tmp', shell='ipython'):
"""
Args:
file (str): if this file exists, will open a shell.
shell (str): one of 'ipython', 'pdb'
"""
self._file = file
assert shell in ['ipython', 'pdb']
self._shell = shell
logger.info("Create a file '{}' to open {} shell.".format(file, shell))
def _trigger(self):
if os.path.isfile(self._file):
logger.info("File {} exists, entering shell.".format(self._file))
self._inject()
def _inject(self):
trainer = self.trainer # noqa
if self._shell == 'ipython':
import IPython as IP # noqa
IP.embed()
elif self._shell == 'pdb':
import pdb # noqa
pdb.set_trace()
def _after_train(self):
if os.path.isfile(self._file):
os.unlink(self._file)
class EstimatedTimeLeft(Callback):
"""
Estimate the time left until completion of training.
"""
def __init__(self, last_k_epochs=5, median=True):
"""
Args:
last_k_epochs (int): Use the time spent on last k epochs to estimate total time left.
median (bool): Use the mean or median time spent on last k epochs.
"""
self._times = deque(maxlen=last_k_epochs)
self._median = median
def _before_train(self):
self._max_epoch = self.trainer.max_epoch
self._last_time = time.time()
def _trigger_epoch(self):
duration = time.time() - self._last_time
self._last_time = time.time()
self._times.append(duration)
epoch_time = np.median(self._times) if self._median else np.mean(self._times)
time_left = (self._max_epoch - self.epoch_num) * epoch_time
if time_left > 0:
logger.info("Estimated Time Left: " + humanize_time_delta(time_left))
| 3,404 | 29.954545 | 98 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/graph.py | # -*- coding: utf-8 -*-
# File: graph.py
""" Graph related callbacks"""
import numpy as np
import os
from ..compat import tfv1 as tf
from ..tfutils.common import get_op_tensor_name
from ..utils import logger
from .base import Callback
__all__ = ['RunOp', 'RunUpdateOps', 'ProcessTensors', 'DumpTensors',
'DumpTensor', 'DumpTensorAsImage', 'DumpParamAsImage', 'CheckNumerics']
class RunOp(Callback):
""" Run an Op. """
_chief_only = False
def __init__(self, op,
run_before=True, run_as_trigger=True,
run_step=False, verbose=False):
"""
Args:
op (tf.Operation or function): an Op, or a function that returns the Op in the graph.
The function will be called after the main graph has been created (in the :meth:`setup_graph` callback).
run_before (bool): run the Op before training
run_as_trigger (bool): run the Op on every :meth:`trigger()` call.
run_step (bool): run the Op every step (along with training)
verbose (bool): print logs when the op is run.
Example:
The `DQN Example
<https://github.com/tensorpack/tensorpack/blob/master/examples/DeepQNetwork/>`_
uses this callback to update target network.
"""
if not callable(op):
self.setup_func = lambda: op # noqa
else:
self.setup_func = op
self.run_before = run_before
self.run_as_trigger = run_as_trigger
self.run_step = run_step
self.verbose = verbose
def _setup_graph(self):
self._op = self.setup_func()
if self.run_step:
self._fetch = tf.train.SessionRunArgs(fetches=self._op)
def _before_train(self):
if self.run_before:
self._print()
self._op.run()
def _trigger(self):
if self.run_as_trigger:
self._print()
self._op.run()
def _before_run(self, _):
if self.run_step:
self._print()
return self._fetch
def _print(self):
if self.verbose:
logger.info("Running Op {} ...".format(self._op.name))
class RunUpdateOps(RunOp):
"""
Run ops from the collection UPDATE_OPS every step.
The ops will be hooked to ``trainer.hooked_sess`` and run along with
each ``hooked_sess.run`` call.
Be careful when using ``UPDATE_OPS`` if your model contains more than one sub-networks.
Perhaps not all updates are supposed to be executed in every iteration.
This callback is one of the :func:`DEFAULT_CALLBACKS()`.
"""
def __init__(self, collection=None):
"""
Args:
collection (str): collection of ops to run. Defaults to ``tf.GraphKeys.UPDATE_OPS``
"""
if collection is None:
collection = tf.GraphKeys.UPDATE_OPS
name = 'UPDATE_OPS' if collection == tf.GraphKeys.UPDATE_OPS else collection
def f():
ops = tf.get_collection(collection)
if ops:
logger.info("Applying collection {} of {} ops.".format(name, len(ops)))
return tf.group(*ops, name='update_ops')
else:
return tf.no_op(name='empty_update_ops')
super(RunUpdateOps, self).__init__(
f, run_before=False, run_as_trigger=False, run_step=True)
class ProcessTensors(Callback):
"""
Fetch extra tensors **along with** each training step,
and call some function over the values.
It uses ``_{before,after}_run`` method to inject ``tf.train.SessionRunHooks``
to the session.
You can use it to print tensors, save tensors to file, etc.
Example:
.. code-block:: python
ProcessTensors(['mycost1', 'mycost2'], lambda c1, c2: print(c1, c2, c1 + c2))
"""
def __init__(self, names, fn):
"""
Args:
names (list[str]): names of tensors
fn: a function taking all requested tensors as input
"""
assert isinstance(names, (list, tuple)), names
self._names = names
self._fn = fn
def _setup_graph(self):
tensors = self.get_tensors_maybe_in_tower(self._names)
self._fetch = tf.train.SessionRunArgs(fetches=tensors)
def _before_run(self, _):
return self._fetch
def _after_run(self, _, rv):
results = rv.results
self._fn(*results)
class DumpTensors(ProcessTensors):
"""
Dump some tensors to a file.
Every step this callback fetches tensors and write them to a npz file
under ``logger.get_logger_dir``.
The dump can be loaded by ``dict(np.load(filename).items())``.
"""
def __init__(self, names):
"""
Args:
names (list[str]): names of tensors
"""
assert isinstance(names, (list, tuple)), names
self._names = names
dir = logger.get_logger_dir()
def fn(*args):
dic = {}
for name, val in zip(self._names, args):
dic[name] = val
fname = os.path.join(
dir, 'DumpTensor-{}.npz'.format(self.global_step))
np.savez(fname, **dic)
super(DumpTensors, self).__init__(names, fn)
class DumpTensorAsImage(Callback):
"""
Dump a tensor to image(s) to ``logger.get_logger_dir()`` once triggered.
Note that it requires the tensor is directly evaluable, i.e. either inputs
are not its dependency (e.g. the weights of the model), or the inputs are
feedfree (in which case this callback will take an extra datapoint from the input pipeline).
"""
def __init__(self, tensor_name, prefix=None, map_func=None, scale=255):
"""
Args:
tensor_name (str): the name of the tensor.
prefix (str): the filename prefix for saved images. Defaults to the Op name.
map_func: map the value of the tensor to an image or list of
images of shape [h, w] or [h, w, c]. If None, will use identity.
scale (float): a multiplier on pixel values, applied after map_func.
"""
op_name, self.tensor_name = get_op_tensor_name(tensor_name)
self.func = map_func
if prefix is None:
self.prefix = op_name
else:
self.prefix = prefix
self.log_dir = logger.get_logger_dir()
self.scale = scale
def _before_train(self):
self._tensor = self.graph.get_tensor_by_name(self.tensor_name)
def _trigger(self):
val = self.trainer.sess.run(self._tensor)
if self.func is not None:
val = self.func(val)
if isinstance(val, list) or val.ndim == 4:
for idx, im in enumerate(val):
self._dump_image(im, idx)
else:
self._dump_image(val)
self.trainer.monitors.put_image(self.prefix, val)
def _dump_image(self, im, idx=None):
assert im.ndim in [2, 3], str(im.ndim)
fname = os.path.join(
self.log_dir,
self.prefix + '-ep{:03d}{}.png'.format(
self.epoch_num, '-' + str(idx) if idx else ''))
res = im * self.scale
res = np.clip(res, 0, 255)
cv2.imwrite(fname, res.astype('uint8'))
class CheckNumerics(Callback):
"""
When triggered, check variables in the graph for NaN and Inf.
Raise exceptions if such an error is found.
"""
def _setup_graph(self):
vars = tf.trainable_variables()
ops = [tf.check_numerics(v, "CheckNumerics['{}']".format(v.op.name)).op for v in vars]
self._check_op = tf.group(*ops)
def _trigger(self):
self._check_op.run()
try:
import cv2
except ImportError:
from ..utils.develop import create_dummy_class
DumpTensorAsImage = create_dummy_class('DumpTensorAsImage', 'cv2') # noqa
# alias
DumpParamAsImage = DumpTensorAsImage
DumpTensor = DumpTensors
| 7,945 | 31.432653 | 120 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/monitor.py | # -*- coding: utf-8 -*-
# File: monitor.py
import json
import numpy as np
import operator
import os
import re
import shutil
import time
from collections import defaultdict
from datetime import datetime
import six
import threading
from ..compat import tfv1 as tf
from ..libinfo import __git_version__
from ..tfutils.summary import create_image_summary, create_scalar_summary
from ..utils import fs, logger
from ..utils.develop import HIDE_DOC
from .base import Callback
__all__ = ['MonitorBase', 'Monitors',
'TFEventWriter', 'JSONWriter',
'ScalarPrinter', 'SendMonitorData',
'CometMLMonitor']
def image_to_nhwc(arr):
if arr.ndim == 4:
pass
elif arr.ndim == 3:
if arr.shape[-1] in [1, 3, 4]:
arr = arr[np.newaxis, :]
else:
arr = arr[:, :, :, np.newaxis]
elif arr.ndim == 2:
arr = arr[np.newaxis, :, :, np.newaxis]
else:
raise ValueError("Array of shape {} is not an image!".format(arr.shape))
return arr
class MonitorBase(Callback):
"""
Base class for monitors which monitor a training progress, by processing different types of
summary/statistics from trainer.
.. document private functions
.. automethod:: _setup_graph
"""
_chief_only = False
def setup_graph(self, trainer):
# Set attributes following Callback.setup_graph
self.trainer = trainer
self.graph = tf.get_default_graph()
self._setup_graph()
def _setup_graph(self):
""" Override this method to setup the monitor."""
pass
def process_summary(self, summary):
"""
Process a tf.Summary.
"""
pass
def process(self, name, val):
"""
Process a key-value pair.
"""
pass
def process_scalar(self, name, val):
"""
Args:
val: a scalar
"""
pass
def process_image(self, name, val):
"""
Args:
val (np.ndarray): 4D (NHWC) numpy array of images in range [0,255].
If channel is 3, assumed to be RGB.
"""
pass
def process_event(self, evt):
"""
Args:
evt (tf.Event): the most basic format acceptable by tensorboard.
It could include Summary, RunMetadata, LogMessage, and more.
"""
pass
# TODO process other types
class NoOpMonitor(MonitorBase):
def __init__(self, name=None):
self._name = name
def __str__(self):
if self._name is None:
return "NoOpMonitor"
return "NoOpMonitor({})".format(self._name)
class Monitors(Callback):
"""
Merge monitors together for trainer to use.
In training, each trainer will create a :class:`Monitors` instance,
and you can access it through ``trainer.monitors``.
You should use ``trainer.monitors`` for logging and it will dispatch your
logs to each sub-monitor.
"""
_chief_only = False
def __init__(self, monitors):
self._scalar_history = ScalarHistory()
self._monitors = monitors + [self._scalar_history]
for m in self._monitors:
assert isinstance(m, MonitorBase), m
def _setup_graph(self):
# scalar_history's other methods were not called.
# but they are not useful for now
self._scalar_history.setup_graph(self.trainer)
def _dispatch(self, func):
for m in self._monitors:
func(m)
def put_summary(self, summary):
"""
Put a `tf.Summary`.
"""
if isinstance(summary, six.binary_type):
summary = tf.Summary.FromString(summary)
assert isinstance(summary, tf.Summary), type(summary)
# TODO other types
for val in summary.value:
if val.WhichOneof('value') == 'simple_value':
val.tag = re.sub('tower[0-9]+/', '', val.tag) # TODO move to subclasses
# TODO This hack is still needed, seem to disappear only when
# compiled from source.
suffix = '-summary' # tensorflow#6150, tensorboard#59
if val.tag.endswith(suffix):
val.tag = val.tag[:-len(suffix)]
self._dispatch(lambda m: m.process_scalar(val.tag, val.simple_value))
self._dispatch(lambda m: m.process_summary(summary))
def put_scalar(self, name, val):
"""
Put a scalar.
"""
if isinstance(val, np.floating):
val = float(val)
if isinstance(val, np.integer):
val = int(val)
self._dispatch(lambda m: m.process_scalar(name, val))
s = create_scalar_summary(name, val)
self._dispatch(lambda m: m.process_summary(s))
def put_image(self, name, val):
"""
Put an image.
Args:
name (str):
val (np.ndarray): 2D, 3D (HWC) or 4D (NHWC) numpy array of images
in range [0,255]. If channel is 3, assumed to be RGB.
"""
assert isinstance(val, np.ndarray)
arr = image_to_nhwc(val)
self._dispatch(lambda m: m.process_image(name, arr))
s = create_image_summary(name, arr)
self._dispatch(lambda m: m.process_summary(s))
def put_event(self, evt):
"""
Put an :class:`tf.Event`.
`step` and `wall_time` fields of :class:`tf.Event` will be filled automatically.
Args:
evt (tf.Event):
"""
evt.step = self.global_step
evt.wall_time = time.time()
self._dispatch(lambda m: m.process_event(evt))
def get_latest(self, name):
"""
Get latest scalar value of some data.
If you run multiprocess training, keep in mind that
the data is perhaps only available on chief process.
Returns:
scalar
"""
return self._scalar_history.get_latest(name)[1]
def get_history(self, name):
"""
Get a history of the scalar value of some data.
If you run multiprocess training, keep in mind that
the data is perhaps only available on chief process.
Returns:
a list of (global_step, value) pairs: history data for this scalar
"""
return self._scalar_history.get_history(name)
class TFEventWriter(MonitorBase):
"""
Write summaries to TensorFlow event file.
"""
def __init__(self, logdir=None, max_queue=10, flush_secs=120, split_files=False):
"""
Args:
logdir: ``logger.get_logger_dir()`` by default.
max_queue, flush_secs: Same as in :class:`tf.summary.FileWriter`.
split_files: if True, split events to multiple files rather than
append to a single file. Useful on certain filesystems where append is expensive.
"""
if logdir is None:
logdir = logger.get_logger_dir()
assert tf.gfile.IsDirectory(logdir), logdir
self._logdir = fs.normpath(logdir)
self._max_queue = max_queue
self._flush_secs = flush_secs
self._split_files = split_files
def __new__(cls, logdir=None, max_queue=10, flush_secs=120, **kwargs):
if logdir is None:
logdir = logger.get_logger_dir()
if logdir is not None:
return super(TFEventWriter, cls).__new__(cls)
else:
logger.warn("logger directory was not set. Ignore TFEventWriter.")
return NoOpMonitor("TFEventWriter")
def _setup_graph(self):
self._writer = tf.summary.FileWriter(
self._logdir, max_queue=self._max_queue, flush_secs=self._flush_secs)
def _write_graph(self):
self._writer.add_graph(self.graph)
def _before_train(self):
# Writing the graph is expensive (takes ~2min) when the graph is large.
# Therefore use a separate thread. It will then run in the
# background while TF is warming up in the first several iterations.
self._write_graph_thread = threading.Thread(target=self._write_graph)
self._write_graph_thread.daemon = True
self._write_graph_thread.start()
@HIDE_DOC
def process_summary(self, summary):
self._writer.add_summary(summary, self.global_step)
@HIDE_DOC
def process_event(self, evt):
self._writer.add_event(evt)
def _trigger(self): # flush every epoch
self._writer.flush()
if self._split_files:
self._writer.close()
self._writer.reopen() # open new file
def _after_train(self):
self._writer.close()
class JSONWriter(MonitorBase):
"""
Write all scalar data to a json file under ``logger.get_logger_dir()``, grouped by their global step.
If found an earlier json history file, will append to it.
"""
FILENAME = 'stats.json'
"""
The name of the json file. Do not change it.
"""
def __new__(cls):
if logger.get_logger_dir():
return super(JSONWriter, cls).__new__(cls)
else:
logger.warn("logger directory was not set. Ignore JSONWriter.")
return NoOpMonitor("JSONWriter")
@staticmethod
def load_existing_json(dir=None):
"""
Look for an existing json under dir (defaults to
:meth:`logger.get_logger_dir()`) named "stats.json",
and return the loaded list of statistics if found. Returns None otherwise.
"""
if dir is None:
dir = logger.get_logger_dir()
fname = os.path.join(dir, JSONWriter.FILENAME)
if tf.gfile.Exists(fname):
with open(fname) as f:
stats = json.load(f)
assert isinstance(stats, list), type(stats)
return stats
return None
@staticmethod
def load_existing_epoch_number(dir=None):
"""
Try to load the latest epoch number from an existing json stats file (if any).
Returns None if not found.
"""
stats = JSONWriter.load_existing_json(dir)
try:
return int(stats[-1]['epoch_num'])
except Exception:
return None
# initialize the stats here, because before_train from other callbacks may use it
def _setup_graph(self):
self._stats = []
self._stat_now = {}
self._last_gs = -1
def _before_train(self):
stats = JSONWriter.load_existing_json()
self._fname = os.path.join(logger.get_logger_dir(), JSONWriter.FILENAME)
if stats is not None:
try:
epoch = stats[-1]['epoch_num'] + 1
except Exception:
epoch = None
# check against the current training settings
# therefore this logic needs to be in before_train stage
starting_epoch = self.trainer.loop.starting_epoch
if epoch is None or epoch == starting_epoch:
logger.info("Found existing JSON inside {}, will append to it.".format(logger.get_logger_dir()))
self._stats = stats
else:
logger.warn(
"History epoch={} from JSON is not the predecessor of the current starting_epoch={}".format(
epoch - 1, starting_epoch))
logger.warn("If you want to resume old training, either use `AutoResumeTrainConfig` "
"or correctly set the new starting_epoch yourself to avoid inconsistency. ")
backup_fname = JSONWriter.FILENAME + '.' + datetime.now().strftime('%m%d-%H%M%S')
backup_fname = os.path.join(logger.get_logger_dir(), backup_fname)
logger.warn("Now, we will train with starting_epoch={} and backup old json to {}".format(
self.trainer.loop.starting_epoch, backup_fname))
shutil.move(self._fname, backup_fname)
# in case we have something to log here.
self._trigger()
def _trigger_step(self):
# will do this in trigger_epoch
if self.local_step != self.trainer.steps_per_epoch - 1:
self._trigger()
def _trigger_epoch(self):
self._trigger()
@HIDE_DOC
def process_scalar(self, name, val):
self._stat_now[name] = val
def _trigger(self):
"""
Add stats to json and dump to disk.
Note that this method is idempotent.
"""
if len(self._stat_now):
self._stat_now['epoch_num'] = self.epoch_num
self._stat_now['global_step'] = self.global_step
self._stats.append(self._stat_now)
self._stat_now = {}
self._write_stat()
def _write_stat(self):
tmp_filename = self._fname + '.tmp'
try:
with open(tmp_filename, 'w') as f:
json.dump(self._stats, f)
shutil.move(tmp_filename, self._fname)
except IOError: # disk error sometimes..
logger.exception("Exception in JSONWriter._write_stat()!")
class ScalarPrinter(MonitorBase):
"""
Print scalar data into terminal.
"""
def __init__(self, enable_step=False, enable_epoch=True,
whitelist=None, blacklist=None):
"""
Args:
enable_step, enable_epoch (bool): whether to print the
monitor data (if any) between steps or between epochs.
whitelist (list[str] or None): A list of regex. Only names
matching some regex will be allowed for printing.
Defaults to match all names.
blacklist (list[str] or None): A list of regex. Names matching
any regex will not be printed. Defaults to match no names.
"""
def compile_regex(rs):
if rs is None:
return None
rs = {re.compile(r) for r in rs}
return rs
self._whitelist = compile_regex(whitelist)
if blacklist is None:
blacklist = []
self._blacklist = compile_regex(blacklist)
self._enable_step = enable_step
self._enable_epoch = enable_epoch
self._dic = {}
# in case we have something to log here.
def _before_train(self):
self._trigger()
def _trigger_step(self):
if self._enable_step:
if self.local_step != self.trainer.steps_per_epoch - 1:
# not the last step
self._trigger()
else:
if not self._enable_epoch:
self._trigger()
# otherwise, will print them together
def _trigger_epoch(self):
if self._enable_epoch:
self._trigger()
@HIDE_DOC
def process_scalar(self, name, val):
self._dic[name] = float(val)
def _trigger(self):
# Print stats here
def match_regex_list(regexs, name):
for r in regexs:
if r.search(name) is not None:
return True
return False
for k, v in sorted(self._dic.items(), key=operator.itemgetter(0)):
if self._whitelist is None or \
match_regex_list(self._whitelist, k):
if not match_regex_list(self._blacklist, k):
logger.info('{}: {:.5g}'.format(k, v))
self._dic = {}
class ScalarHistory(MonitorBase):
"""
Only internally used by monitors.
"""
def __init__(self):
self._dic = defaultdict(list)
@HIDE_DOC
def process_scalar(self, name, val):
self._dic[name].append((self.global_step, float(val)))
def get_latest(self, name):
hist = self._dic[name]
if len(hist) == 0:
raise KeyError("No available data for the key: {}".format(name))
else:
return hist[-1]
def get_history(self, name):
return self._dic[name]
class SendMonitorData(MonitorBase):
"""
Execute a command with some specific scalar monitor data.
This is useful for, e.g. building a custom statistics monitor.
It will try to send once receiving all the stats
"""
def __init__(self, command, names):
"""
Args:
command(str): a command to execute. Use format string with stat
names as keys.
names(list or str): data name(s) to use.
Example:
Send the stats to your phone through pushbullet:
.. code-block:: python
SendMonitorData('curl -u your_id: https://api.pushbullet.com/v2/pushes \\
-d type=note -d title="validation error" \\
-d body={validation_error} > /dev/null 2>&1',
'validation_error')
"""
self.command = command
if not isinstance(names, list):
names = [names]
self.names = names
self.dic = {}
@HIDE_DOC
def process_scalar(self, name, val):
if name in self.names:
self.dic[name] = val
def _trigger_step(self):
self._trigger()
def _trigger(self):
try:
v = {k: self.dic[k] for k in self.names}
except KeyError:
return
cmd = self.command.format(**v)
ret = os.system(cmd)
if ret != 0:
logger.error("Command '{}' failed with ret={}!".format(cmd, ret))
self.dic = {}
class CometMLMonitor(MonitorBase):
"""
Send scalar data and the graph to https://www.comet.ml.
Note:
1. comet_ml requires you to `import comet_ml` before importing tensorflow or tensorpack.
2. The "automatic output logging" feature of comet_ml will make the training progress bar appear to freeze.
Therefore the feature is disabled by default.
"""
def __init__(self, experiment=None, tags=None, **kwargs):
"""
Args:
experiment (comet_ml.Experiment): if provided, invalidate all other arguments
tags (list[str]): experiment tags
kwargs: arguments used to initialize :class:`comet_ml.Experiment`,
such as project name, API key, etc.
Refer to its documentation for details.
"""
if experiment is not None:
self._exp = experiment
assert tags is None and len(kwargs) == 0
else:
from comet_ml import Experiment
kwargs.setdefault('log_code', True) # though it's not functioning, git patch logging requires it
kwargs.setdefault('auto_output_logging', None)
self._exp = Experiment(**kwargs)
if tags is not None:
self._exp.add_tags(tags)
self._exp.set_code("Code logging is impossible ...")
self._exp.log_dependency('tensorpack', __git_version__)
@property
def experiment(self):
"""
The :class:`comet_ml.Experiment` instance.
"""
return self._exp
def _before_train(self):
self._exp.set_model_graph(tf.get_default_graph())
@HIDE_DOC
def process_scalar(self, name, val):
self._exp.log_metric(name, val, step=self.global_step)
@HIDE_DOC
def process_image(self, name, val):
self._exp.set_step(self.global_step)
for idx, v in enumerate(val):
log_name = "{}_step{}{}".format(
name,
self.global_step,
"_" + str(idx) if len(val) > 1 else "")
self._exp.log_image(v, image_format="jpeg", name=log_name, image_minmax=(0, 255))
def _after_train(self):
self._exp.end()
def _after_epoch(self):
self._exp.log_epoch_end(self.epoch_num)
| 19,702 | 31.037398 | 115 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .base import *
from .concurrency import *
from .graph import *
from .group import *
from .hooks import *
from .inference import *
from .inference_runner import *
from .monitor import *
from .param import *
from .prof import *
from .saver import *
from .misc import *
from .steps import *
from .summary import *
from .trigger import *
from pkgutil import iter_modules
import os
__all__ = []
def _global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
if lst:
del globals()[name]
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
_CURR_DIR = os.path.dirname(__file__)
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if module_name.endswith('_test'):
continue
if not module_name.startswith('_'):
_global_import(module_name)
| 1,359 | 24.185185 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/param.py | # -*- coding: utf-8 -*-
# File: param.py
import operator
import os
import numpy as np
from abc import ABCMeta, abstractmethod
from collections import deque
import six
from ..compat import tfv1
from ..tfutils.common import get_op_tensor_name
from ..utils import logger
from .base import Callback
__all__ = ['HyperParam', 'GraphVarParam', 'ObjAttrParam',
'HyperParamSetter', 'HumanHyperParamSetter',
'ScheduledHyperParamSetter',
'StatMonitorParamSetter', 'HyperParamSetterWithFunc',
]
@six.add_metaclass(ABCMeta)
class HyperParam(object):
""" Base class for a hyperparam. """
def setup_graph(self):
""" setup the graph in ``setup_graph`` callback stage, if necessary"""
pass
@abstractmethod
def set_value(self, v):
"""
Set the value of the param.
Args:
v: the value to be set
"""
pass
@abstractmethod
def get_value(self):
"""
Get the value of the param.
"""
pass
@property
def readable_name(self):
""" A name to display """
return self._readable_name
class GraphVarParam(HyperParam):
""" A variable in the graph (e.g. learning_rate) can be a hyperparam."""
def __init__(self, name, shape=()):
"""
Args:
name(str): name of the variable.
shape(tuple): shape of the variable.
"""
self.name = name
self.shape = shape
self._readable_name, self.var_name = get_op_tensor_name(name)
def setup_graph(self):
""" Will setup the assign operator for that variable. """
all_vars = tfv1.global_variables() + tfv1.local_variables()
for v in all_vars:
if v.name == self.var_name:
self.var = v
break
else:
raise ValueError("{} is not a variable in the graph!".format(self.var_name))
def set_value(self, v):
""" Assign the variable a new value. """
if not self.var.dtype.is_floating and isinstance(v, float):
raise ValueError(
"HyperParam {} has type '{}'. Cannot update it using float values.".format(
self.name, self.var.dtype))
self.var.load(v)
def get_value(self):
""" Evaluate the variable. """
return self.var.eval()
class ObjAttrParam(HyperParam):
""" An attribute of an object can be a hyperparam. """
def __init__(self, obj, attrname, readable_name=None):
"""
Args:
obj: the object
attrname (str): the attribute
readable_name(str): The name to display and set with. Defaults to be ``attrname``.
"""
self.obj = obj
self.attrname = attrname
if readable_name is None:
self._readable_name = attrname
else:
self._readable_name = readable_name
def set_value(self, v):
setattr(self.obj, self.attrname, v)
def get_value(self):
return getattr(self.obj, self.attrname)
class HyperParamSetter(Callback):
"""
An abstract base callback to set hyperparameters.
Once the :meth:`trigger()` method is called,
the method :meth:`_get_value_to_set` will be used to get a new value for the hyperparameter.
"""
_chief_only = False
"""
Also enable this hyperparam setter in the :meth:`before_train` method.
"""
_enable_before_train = True
def __init__(self, param):
"""
Args:
param(HyperParam or str): if is a :class:`str`, it is assumed to
be a :class:`GraphVarParam`.
"""
# if a string, assumed to be a scalar graph variable
if isinstance(param, six.string_types):
param = GraphVarParam(param)
assert isinstance(param, HyperParam), type(param)
self.param = param
self._last_value = None
self._last_epoch_set = -1
def _setup_graph(self):
self.param.setup_graph()
def get_value_to_set(self):
"""
Returns:
The value to assign to the variable.
Note:
Subclasses will implement the abstract method
:meth:`_get_value_to_set`, which should return a new value to
set, or return None to do nothing.
"""
ret = self._get_value_to_set()
if ret is not None and ret != self._last_value:
if self.epoch_num != self._last_epoch_set: # Print this message at most once every epoch
if self._last_value is None:
logger.info("[HyperParamSetter] At global_step={}, {} is set to {:.6f}".format(
self.global_step, self.param.readable_name, ret))
else:
logger.info("[HyperParamSetter] At global_step={}, {} changes from {:.6f} to {:.6f}".format(
self.global_step, self.param.readable_name, self._last_value, ret))
self._last_epoch_set = self.epoch_num
self._last_value = ret
return ret
@abstractmethod
def _get_value_to_set(self):
pass
def get_current_value(self):
"""
Returns:
The current value of the param.
"""
return self.param.get_value()
def _trigger(self):
self._set_param()
def _before_train(self):
if self._enable_before_train:
self._set_param()
def _set_param(self):
v = self.get_value_to_set()
if v is not None:
self.param.set_value(v)
class HumanHyperParamSetter(HyperParamSetter):
"""
Set hyperparameter by loading the value from a file each time it get called.
This is useful for manually tuning some parameters (e.g. learning_rate)
without interrupting the training.
"""
def __init__(self, param, file_name='hyper.txt'):
"""
Args:
param: same as in :class:`HyperParamSetter`.
file_name(str): a file containing the new value of the parameter.
Each line in the file is a ``k:v`` pair, for example, ``learning_rate:1e-4``.
If the pair is not found, the param will not be changed.
"""
super(HumanHyperParamSetter, self).__init__(param)
self.file_name = os.path.join(logger.get_logger_dir(), file_name)
logger.info("Use {} to set hyperparam: '{}'.".format(
self.file_name, self.param.readable_name))
def _get_value_to_set(self):
# ignore if no such file exists
if not os.path.isfile(self.file_name):
return None
try:
with open(self.file_name) as f:
lines = f.readlines()
lines = [s.strip().split(':') for s in lines]
dic = {str(k): float(v) for k, v in lines}
ret = dic[self.param.readable_name]
return ret
except Exception:
logger.warn(
"Cannot find {} in {}".format(
self.param.readable_name, self.file_name))
return None
class ScheduledHyperParamSetter(HyperParamSetter):
"""
Set hyperparameters by a predefined epoch-based schedule.
"""
def __init__(self, param, schedule, interp=None, step_based=False):
"""
Args:
param: same as in :class:`HyperParamSetter`.
schedule (list): with the format ``[(epoch1, val1), (epoch2, val2), (epoch3, val3)]``.
Each ``(ep, val)`` pair means to set the param
to "val" **after** the completion of epoch `ep`.
If ep == 0, the value will be set before the first epoch
(because by default the first is epoch 1).
The epoch numbers have to be increasing.
interp (str or None): Either None or 'linear'.
If None, the parameter will only be set when the specific epoch or steps
is reached exactly. If 'linear', perform linear interpolation (but no extrapolation)
every time this callback is triggered.
step_based (bool): interpret ``schedule`` as (step, value) instead
of (epoch, value).
Example:
.. code-block:: python
ScheduledHyperParamSetter('learning_rate',
[(30, 1e-2), (60, 1e-3), (85, 1e-4), (95, 1e-5)]),
"""
schedule = [(int(a), float(b)) for a, b in schedule]
self.schedule = sorted(schedule, key=operator.itemgetter(0))
if interp is not None:
assert interp == 'linear'
self.interp = interp
self._step = step_based
super(ScheduledHyperParamSetter, self).__init__(param)
def _get_value_to_set(self): # override parent
return self._get_value_to_set_at_point(self._current_point())
def _current_point(self):
return self.global_step if self._step else self.epoch_num
def _check_value_at_beginning(self):
v = None
# we are at `before_train`, therefore the epoch/step associated with `current_point` has finished.
for p in range(0, self._current_point() + 1):
v = self._get_value_to_set_at_point(p) or v
actual_value = self.param.get_value()
if v is not None and not np.isclose(v, actual_value):
logger.warn("According to scheduler {}, parameter '{}' should become {} at the current point. "
"However its current value is {}. "
"If this is the only scheduler being used, you may want to check whether your "
"initialization of the parameter is as expected".format(
self, self.param.readable_name, v, actual_value))
def _get_value_to_set_at_point(self, point):
"""
Using schedule, compute the value to be set at a given point.
"""
laste, lastv = None, None
for e, v in self.schedule:
if e == point:
return v # meet the exact boundary, return directly
if e > point:
break
laste, lastv = e, v
if laste is None or laste == e:
# hasn't reached the first scheduled point, or reached the end of all scheduled points
return None
if self.interp is None:
# If no interpolation, nothing to do.
return None
v = (point - laste) * 1. / (e - laste) * (v - lastv) + lastv
return v
def _before_train(self):
super(ScheduledHyperParamSetter, self)._before_train()
self._check_value_at_beginning()
def _trigger_epoch(self):
if not self._step:
self.trigger()
def _trigger_step(self):
if self._step:
self.trigger()
def __str__(self):
return "ScheduledHyperParamSetter(schedule={})".format(self.schedule)
class HyperParamSetterWithFunc(HyperParamSetter):
""" Set the parameter by a function of epoch num and old value. """
def __init__(self, param, func):
"""
Args:
param: same as in :class:`HyperParamSetter`.
func: ``param`` will be set by ``new_value = func(epoch_num, old_value)``.
``epoch_num`` is the number of epochs that have finished.
Example:
Decrease by a factor of 0.9 every two epochs:
.. code-block:: python
HyperParamSetterWithFunc('learning_rate',
lambda e, x: x * 0.9 if e % 2 == 0 else x)
"""
super(HyperParamSetterWithFunc, self).__init__(param)
self.f = func
def _get_value_to_set(self):
return self.f(self.epoch_num, self.get_current_value())
class StatMonitorParamSetter(HyperParamSetter):
"""
Change the param by monitoring the change of a scalar statistics.
The param will be changed when the scalar does not decrease/increase enough.
Once triggered, this callback observes the latest **one** value of ``stat_name``, from the monitor backend.
This callback will then change a hyperparameter ``param`` by ``new_value = value_func(old_value)``, if:
``min(history) >= history[0] - threshold``, where
``history = [the most recent k observations of stat_name]``
Note:
The statistics of interest must be created at a frequency higher than or equal to this callback.
For example, using ``PeriodicTrigger(StatMonitorParamSetter(...), every_k_steps=100)``
is meaningless if the statistics to be monitored is only updated every 500 steps.
Callbacks are executed in order. Therefore, if the statistics to be monitored
is created after this callback, the behavior of this callback may get delayed.
Example:
If validation error wasn't decreasing for 5 epochs, decay the learning rate by 0.2:
.. code-block:: python
StatMonitorParamSetter('learning_rate', 'val-error',
lambda x: x * 0.2, threshold=0, last_k=5)
"""
_enable_before_train = False
def __init__(self, param, stat_name, value_func, threshold,
last_k, reverse=False):
"""
Args:
param: same as in :class:`HyperParamSetter`.
stat_name (str): name of the statistics.
value_func (float -> float): a function which returns a new value
taking the old value.
threshold (float): change threshold.
last_k (int): use last k observations of statistics.
reverse (bool): monitor increasing instead of decreasing.
If True, ``param`` will be changed when ``max(history) <= history[0] + threshold``.
"""
super(StatMonitorParamSetter, self).__init__(param)
self.stat_name = stat_name
self.value_func = value_func
self.history = deque(maxlen=last_k)
self.threshold = threshold
self.reverse = reverse
def _get_value_to_set(self):
try:
last = self.trainer.monitors.get_history(self.stat_name)[-1]
except (KeyError, IndexError):
logger.warn(
"[StatMonitorParamSetter] No history data available for key '{}'.".format(self.stat_name))
return None
if len(self.history) and last[0] == self.history[-1][0]:
logger.warn("StatMonitorParamSetter is triggered, but no new data has been added since last time.")
return None
self.history.append(last)
if len(self.history) < self.history.maxlen:
return None
values = [k[1] for k in self.history]
hist_first = values[0]
if not self.reverse:
hist_min = min(values)
if hist_min < hist_first - self.threshold: # small enough
return None
else:
hist_max = max(values)
if hist_max > hist_first + self.threshold: # large enough
return None
self.history.clear()
logger.info(
"[StatMonitorParamSetter] Triggered, history of {}: ".format(
self.stat_name) + ','.join([str(round(x, 3)) for x in values]))
return self.value_func(self.get_current_value())
| 15,332 | 34.741259 | 112 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/summary.py | # -*- coding: utf-8 -*-
# File: summary.py
import numpy as np
from collections import deque
from ..compat import tfv1 as tf
from ..tfutils.common import get_op_tensor_name
from ..utils import logger
from ..utils.naming import MOVING_SUMMARY_OPS_KEY
from .base import Callback
__all__ = ['MovingAverageSummary', 'MergeAllSummaries', 'SimpleMovingAverage']
class MovingAverageSummary(Callback):
"""
Maintain the moving average of summarized tensors in every step,
by ops added to the collection.
Note that it only **maintains** the moving averages by updating
the relevant variables in the graph,
the actual summary should be done in other callbacks.
This callback is one of the :func:`DEFAULT_CALLBACKS()`.
"""
def __init__(self, collection=MOVING_SUMMARY_OPS_KEY, train_op=None):
"""
Args:
collection(str): the collection of EMA-maintaining ops.
The default value would work with
the tensors you added by :func:`tfutils.summary.add_moving_summary()`,
but you can use other collections as well.
train_op (tf.Operation or str): the (name of) training op to associate the maintaing ops with.
If not provided, the EMA-maintaining ops will be hooked to
`trainer.hooked_session` and be executed in every iteration.
Otherwise, the EMA-maintaining ops will be executed whenever
the training op is executed.
"""
self._collection = collection
self._train_op = train_op
def _setup_graph(self):
ops = [k.op for k in tf.get_collection(self._collection)]
if self._train_op is None:
logger.info("[MovingAverageSummary] {} operations in collection '{}' "
"will be run with session hooks.".format(len(ops), self._collection))
self.ema_op = tf.group(*ops, name='maintain_moving_average_summary')
self._fetch = tf.train.SessionRunArgs(fetches=self.ema_op)
else:
if isinstance(self._train_op, tf.Tensor):
self._train_op = self._train_op.op
if not isinstance(self._train_op, tf.Operation):
self._train_op = self.graph.get_operation_by_name(self._train_op)
self._train_op._add_control_inputs(ops)
logger.info("[MovingAverageSummary] {} operations in collection '{}'"
" will be run together with operation '{}'.".format(
len(ops), self._collection, self._train_op.name))
def _before_run(self, _):
if self._train_op is None:
return self._fetch
class MergeAllSummaries_RunAlone(Callback):
def __init__(self, period, key):
self._period = period
self._key = key
def _setup_graph(self):
size = len(tf.get_collection(self._key))
logger.info("Summarizing collection '{}' of size {}.".format(self._key, size))
self.summary_op = tf.summary.merge_all(self._key)
def _trigger_step(self):
if self._period:
if (self.local_step + 1) % self._period == 0:
self._trigger()
def _trigger(self):
if self.summary_op:
summary = self.summary_op.eval()
self.trainer.monitors.put_summary(summary)
class MergeAllSummaries_RunWithOp(Callback):
def __init__(self, period, key):
self._period = period
self._key = key
def _setup_graph(self):
size = len(tf.get_collection(self._key))
logger.info("Summarizing collection '{}' of size {}.".format(self._key, size))
self.summary_op = tf.summary.merge_all(self._key)
if self.summary_op is not None:
self._fetches = tf.train.SessionRunArgs(self.summary_op)
else:
self._fetches = None
def _need_run(self):
if self.local_step == self.trainer.steps_per_epoch - 1:
return True
if self._period > 0 and (self.local_step + 1) % self._period == 0:
return True
return False
def _before_run(self, ctx):
if self._need_run():
return self._fetches
return None
def _after_run(self, _, run_values):
summary = run_values.results
if summary is None:
return
self.trainer.monitors.put_summary(summary)
def MergeAllSummaries(period=0, run_alone=False, key=None):
"""
Evaluate all summaries by ``tf.summary.merge_all``, and write them to logs.
This callback is one of the :func:`DEFAULT_CALLBACKS()`.
Args:
period (int): by default the callback summarizes once every epoch.
This option (if not set to 0) makes it additionally summarize every ``period`` steps.
run_alone (bool): whether to evaluate the summaries alone.
If True, summaries will be evaluated after each epoch alone.
If False, summaries will be evaluated together with the
`sess.run` calls, in the last step of each epoch.
For :class:`SimpleTrainer`, it needs to be False because summary may
depend on inputs.
key (str): the collection of summary tensors. Same as in ``tf.summary.merge_all``.
Default is ``tf.GraphKeys.SUMMARIES``.
"""
if key is None:
key = tf.GraphKeys.SUMMARIES
period = int(period)
if run_alone:
return MergeAllSummaries_RunAlone(period, key)
else:
return MergeAllSummaries_RunWithOp(period, key)
class SimpleMovingAverage(Callback):
"""
Monitor Simple Moving Average (SMA), i.e. an average within a sliding window,
of some tensors.
"""
def __init__(self, tensors, window_size):
"""
Args:
tensors (str or [str]): names of tensors
window_size (int): size of the moving window
"""
self._tensor_names = [get_op_tensor_name(x)[1] for x in tensors]
self._display_names = [get_op_tensor_name(x)[0] for x in tensors]
self._window = int(window_size)
self._queue = deque(maxlen=window_size)
def _setup_graph(self):
tensors = self.get_tensors_maybe_in_tower(self._tensor_names)
for t in tensors:
assert t.get_shape().ndims == 0, \
"SimpleMovingAverage only accepts scalar tensor! Got one with {}".format(t.get_shape())
self._fetch = tf.train.SessionRunArgs(fetches=tensors)
def _before_run(self, _):
return self._fetch
def _after_run(self, _, rv):
results = rv.results
self._queue.append(results)
def _trigger_step(self):
if self.global_step % self._window == 0:
averages = np.asarray(self._queue).mean(axis=0)
for name, avg in zip(self._display_names, averages):
self.trainer.monitors.put_scalar(name + '/SMA', avg)
| 6,892 | 36.666667 | 106 | py |
SyNet | SyNet-master/tensorpack/tensorpack/callbacks/inference_runner.py | # -*- coding: utf-8 -*-
# File: inference_runner.py
import itertools
import sys
from contextlib import contextmanager
import tqdm
from tensorflow.python.training.monitored_session import _HookedSession as HookedSession
from ..compat import tfv1 as tf
from ..dataflow.base import DataFlow
from ..input_source import FeedInput, InputSource, QueueInput, StagingInput
from ..tfutils.tower import PredictTowerContext
from ..utils import logger
from ..utils.utils import get_tqdm_kwargs
from .base import Callback
from .group import Callbacks
from .inference import Inferencer
__all__ = ['InferenceRunnerBase', 'InferenceRunner',
'DataParallelInferenceRunner']
def _device_from_int(dev):
return '/gpu:{}'.format(dev) if dev >= 0 else '/cpu:0'
class InferencerToHook(tf.train.SessionRunHook):
def __init__(self, inf, fetches):
self._inf = inf
self._fetches = fetches
def before_run(self, _):
return tf.train.SessionRunArgs(fetches=self._fetches)
def after_run(self, _, run_values):
self._inf.on_fetches(run_values.results)
@contextmanager
def _inference_context():
msg = "You might need to check your input implementation."
try:
yield
except (StopIteration, tf.errors.CancelledError):
logger.error(
"[InferenceRunner] input stopped before reaching its __len__()! " + msg)
raise
except tf.errors.OutOfRangeError: # tf.data reaches an end
pass
class InferenceRunnerBase(Callback):
""" Base class for inference runner.
Note:
1. InferenceRunner will use `input.size()` to determine
how much iterations to run, so you're responsible to ensure that
`input.size()` is accurate.
2. Only works with instances of `TowerTrainer`.
"""
def __init__(self, input, infs):
"""
Args:
input (InputSource): the input to use. Must have an accurate ``size()``.
infs (list[Inferencer]): list of :class:`Inferencer` to run.
"""
self._input_source = input
if not isinstance(infs, list):
self.infs = [infs]
else:
self.infs = infs
for v in self.infs:
assert isinstance(v, Inferencer), v
try:
self._size = input.size()
except NotImplementedError:
self._size = 0
self._hooks = []
def register_hook(self, hook):
"""
Args:
hook (tf.train.SessionRunHook):
"""
self._hooks.append(hook)
def _before_train(self):
self._hooked_sess = HookedSession(self.trainer.sess, self._hooks)
self._input_callbacks.before_train()
if self._size > 0:
logger.info("[InferenceRunner] Will eval {} iterations".format(self._size))
else:
logger.warn("[InferenceRunner] Got an InputSource with unknown size! Will iterate until OutOfRangeError!")
def _after_train(self):
self._input_callbacks.after_train()
class InferenceRunner(InferenceRunnerBase):
"""
A callback that runs a list of :class:`Inferencer` on some :class:`InputSource`.
"""
def __init__(self, input, infs, tower_name='InferenceTower', tower_func=None, device=0):
"""
Args:
input (InputSource or DataFlow): The :class:`InputSource` to run
inference on. If given a DataFlow, will use :class:`FeedInput`.
infs (list): a list of :class:`Inferencer` instances.
tower_name (str): the name scope of the tower to build.
If multiple InferenceRunner are used, each needs a different tower_name.
tower_func (tfutils.TowerFunc or None): the tower function to be used to build the graph.
By defaults to call `trainer.tower_func` under a `training=False` TowerContext,
but you can change it to a different tower function
if you need to inference with several different graphs.
device (int): the device to use
"""
if isinstance(input, DataFlow):
# use infinite=False so that a dataflow without size will stop normally
# TODO a better way to handle inference size
input = FeedInput(input, infinite=False)
assert isinstance(input, InputSource), input
assert not isinstance(input, StagingInput), input
self._tower_name = tower_name
self._device_id = device
self._device = _device_from_int(device)
self._tower_func = tower_func
super(InferenceRunner, self).__init__(input, infs)
def _build_hook(self, inf):
out_names = inf.get_fetches()
fetches = self._tower_handle.get_tensors(out_names)
return InferencerToHook(inf, fetches)
def _setup_graph(self):
if self._tower_func is None:
assert self.trainer.tower_func is not None, "You must set tower_func of the trainer to use InferenceRunner!"
self._tower_func = self.trainer.tower_func
input_callbacks = self._input_source.setup(self._tower_func.input_signature)
vs_name = self.trainer._vs_name_for_predictor(self._device_id)
logger.info("[InferenceRunner] Building tower '{}' on device {} {}...".format(
self._tower_name, self._device,
"with variable scope '{}'".format(vs_name) if vs_name else ''))
with tf.variable_scope(tf.get_variable_scope(), reuse=True), \
tf.device(self._device), \
PredictTowerContext(self._tower_name, vs_name=vs_name):
self._tower_func(*self._input_source.get_input_tensors())
self._tower_handle = self._tower_func.towers[-1]
for h in [self._build_hook(inf) for inf in self.infs]:
self.register_hook(h)
# trigger_{step,epoch}, {before,after}_epoch is ignored.
# We assume that InputSource callbacks won't use these methods
self._input_callbacks = Callbacks(input_callbacks)
for h in self._input_callbacks.get_hooks():
self.register_hook(h)
for inf in self.infs:
inf.setup_graph(self.trainer)
self._input_callbacks.setup_graph(self.trainer)
def _trigger(self):
for inf in self.infs:
inf.before_epoch()
self._input_source.reset_state()
# iterate over the data, and run the hooked session
with _inference_context(), \
tqdm.tqdm(total=self._size, **get_tqdm_kwargs()) as pbar:
num_itr = self._size if self._size > 0 else sys.maxsize
for _ in range(num_itr):
self._hooked_sess.run(fetches=[])
pbar.update()
for inf in self.infs:
inf.trigger_epoch()
class DataParallelInferenceRunner(InferenceRunnerBase):
"""
Inference with data-parallel support on multiple GPUs.
It will build one predict tower on each GPU, and run prediction
with a large total batch in parallel on all GPUs.
It will run the remainder (when the total size of input is not a multiple of #GPU)
sequentially.
"""
def __init__(self, input, infs, gpus, tower_name='InferenceTower', tower_func=None):
"""
Args:
input (DataFlow or QueueInput)
gpus (int or list[int]): #gpus, or list of GPU id
tower_name (str): the name scope of the tower to build.
If multiple InferenceRunner are used, each needs a different tower_name.
tower_func (tfutils.TowerFunc or None): the tower function to be used to build the graph.
The tower function will be called under a `training=False` TowerContext.
The default is `trainer.tower_func`,
but you can change it to a different tower function
if you need to inference with several different models.
"""
if isinstance(gpus, int):
gpus = list(range(gpus))
self._devices = [_device_from_int(k) for k in gpus]
self._tower_names = ['{}{}'.format(tower_name, k) for k in range(len(gpus))]
if isinstance(input, DataFlow):
input = QueueInput(input)
assert isinstance(input, QueueInput), input
super(DataParallelInferenceRunner, self).__init__(input, infs)
assert self._size > 0, "Input for DataParallelInferenceRunner must have a size!"
self._hooks = []
self._hooks_parallel = []
self._tower_func = tower_func
def _setup_graph(self):
self._handles = []
if self._tower_func is None:
assert self.trainer.tower_func is not None, "You must set tower_func of the trainer to use InferenceRunner!"
self._tower_func = self.trainer.tower_func
input_callbacks = self._input_source.setup(self._tower_func.input_signature)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
for idx, dev in enumerate(self._devices):
vs_name = self.trainer._vs_name_for_predictor(idx)
with tf.device(dev), PredictTowerContext(
self._tower_names[idx], vs_name=vs_name):
logger.info("[InferenceRunner] Building tower '{}' on device {} {}...".format(
self._tower_names[idx], dev,
"with variable scope '{}'".format(vs_name) if vs_name else ''))
# TODO log for tower creation, here or in tower.py?
self._tower_func(*self._input_source.get_input_tensors())
self._handles.append(self._tower_func.towers[-1])
# setup callbacks and hooks
self._input_callbacks = Callbacks(input_callbacks)
# TODO InputSource might have hooks which break us.
# e.g. hooks from StagingInput will force the consumption
# of nr_tower datapoints in every run.
input_hooks = self._input_callbacks.get_hooks()
self._hooks.extend([self._build_hook(inf) for inf in self.infs] + input_hooks)
self._hooks_parallel.extend([self._build_hook_parallel(inf) for inf in self.infs] + input_hooks)
for inf in self.infs:
inf.setup_graph(self.trainer)
self._input_callbacks.setup_graph(self.trainer)
def register_hook(self, h):
logger.info(
"[DataParallelInferenceRunner] Registering hook {} on both parallel and sequential inference.")
self._hooks.append(h)
self._hooks_parallel.append(h)
class _InferencerToHookDataParallel(InferencerToHook):
def __init__(self, inf, fetches, size):
"""
Args:
size(int): number of tensors to fetch per tower
"""
super(DataParallelInferenceRunner._InferencerToHookDataParallel, self).__init__(inf, fetches)
assert len(self._fetches) % size == 0
self._sz = size
def after_run(self, _, run_values):
res = run_values.results
for i in range(0, len(res), self._sz):
vals = res[i:i + self._sz]
self._inf.on_fetches(vals)
def _build_hook_parallel(self, inf):
out_names = inf.get_fetches()
sz = len(out_names)
fetches = list(itertools.chain(*[t.get_tensors(out_names) for t in self._handles]))
return self._InferencerToHookDataParallel(inf, fetches, sz)
def _build_hook(self, inf):
out_names = inf.get_fetches()
fetches = self._handles[0].get_tensors(out_names)
return InferencerToHook(inf, fetches)
def _before_train(self):
super(DataParallelInferenceRunner, self)._before_train()
self._parallel_hooked_sess = HookedSession(self.trainer.sess, self._hooks_parallel)
def _trigger(self):
for inf in self.infs:
inf.before_epoch()
total = self._size
nr_tower = len(self._devices)
self._input_source.reset_state()
with _inference_context():
with tqdm.tqdm(total=total, **get_tqdm_kwargs()) as pbar:
while total >= nr_tower:
self._parallel_hooked_sess.run(fetches=[])
pbar.update(nr_tower)
total -= nr_tower
# take care of the rest
for _ in range(total):
self._hooked_sess.run(fetches=[])
pbar.update(1)
for inf in self.infs:
inf.trigger_epoch()
| 12,490 | 39.555195 | 120 | py |
SyNet | SyNet-master/tensorpack/tensorpack/input_source/input_source.py | # -*- coding: utf-8 -*-
# File: input_source.py
import threading
from contextlib import contextmanager
from itertools import chain
import tensorflow as tf
from ..compat import tfv1
from ..callbacks.base import Callback, CallbackFactory
from ..callbacks.graph import RunOp
from ..dataflow import DataFlow, MapData, RepeatedData, DataFlowTerminated
from ..tfutils.common import get_op_tensor_name
from ..tfutils.dependency import dependency_of_fetches
from ..tfutils.summary import add_moving_summary
from ..tfutils.tower import get_current_tower_context
from ..utils import logger
from ..utils.concurrency import ShareSessionThread
from .input_source_base import InputSource, build_or_reuse_placeholder
try:
from tensorflow.python.ops.data_flow_ops import StagingArea
except ImportError:
pass
__all__ = ['PlaceholderInput', 'FeedInput', 'FeedfreeInput',
'QueueInput', 'BatchQueueInput',
'DummyConstantInput', 'TensorInput',
'ZMQInput', 'TFDatasetInput',
'StagingInput']
def _get_reset_callback(df):
return CallbackFactory(setup_graph=lambda _: df.reset_state())
def _make_feeds(placeholders, datapoint):
assert len(datapoint) == len(placeholders), \
"Size of datapoint and placeholders are different: {} != {}".format(
len(datapoint), len(placeholders))
if isinstance(datapoint, (list, tuple)):
return dict(zip(placeholders, datapoint))
elif isinstance(datapoint, dict):
ret = {p: datapoint[p.op.name] for p in placeholders}
return ret
else:
raise TypeError("Got a datapoint of type {}!".format(type(datapoint)))
class PlaceholderInput(InputSource):
"""
Just produce placeholders as input tensors.
"""
def __init__(self):
pass
def _setup(self, inputs):
self._all_placehdrs = [build_or_reuse_placeholder(v) for v in inputs]
def _get_input_tensors(self):
return self._all_placehdrs
class FeedInput(InputSource):
"""
Input by iterating over a DataFlow and feed datapoints.
Note:
If `get_input_tensors()` is called more than one time, it will return the same placeholders (i.e. feed points)
as the first time.
Therefore you can't use it for data-parallel training.
"""
class _FeedCallback(Callback):
def __init__(self, ds, placeholders):
self._ds = ds
self._itr = self._ds.__iter__()
self._placeholders = placeholders
def _before_run(self, _):
dp = next(self._itr)
assert len(dp) == len(self._placeholders), "[FeedInput] datapoints and inputs are of different length!"
feed = _make_feeds(self._placeholders, dp)
return tfv1.train.SessionRunArgs(fetches=[], feed_dict=feed)
def _reset(self):
self._itr = self._ds.__iter__()
def __init__(self, ds, infinite=True):
"""
Args:
ds (DataFlow): the input DataFlow.
infinite (bool): When set to False, will raise StopIteration when
ds is exhausted.
"""
if not isinstance(ds, DataFlow):
raise ValueError("FeedInput takes a DataFlow! Got {}".format(ds))
self.ds = ds
if infinite:
self._iter_ds = RepeatedData(self.ds, -1)
else:
self._iter_ds = self.ds
def _size(self):
return len(self.ds)
def _setup(self, inputs):
# placeholders as input are always safe to reuse.
self._all_placehdrs = [build_or_reuse_placeholder(v) for v in inputs]
self._cb = self._FeedCallback(self._iter_ds, self._all_placehdrs)
def _get_input_tensors(self):
return self._all_placehdrs
def _reset_state(self):
self._cb._reset()
def _get_callbacks(self):
return [self._cb, _get_reset_callback(self._iter_ds)]
class FeedfreeInput(InputSource):
""" Abstract base for input without feed,
e.g. by queue or other operations. """
def _reset_state(self):
pass
# TODO enqueue_many? https://github.com/tensorflow/tensorflow/issues/7817#issuecomment-282053155
class EnqueueThread(ShareSessionThread):
def __init__(self, queue, ds, placehdrs):
super(EnqueueThread, self).__init__()
self.name = 'EnqueueThread: enqueue dataflow to TF queue "{}"'.format(queue.name)
self.daemon = True
self.dataflow = ds
self.queue = queue
self.placehdrs = placehdrs
self.op = self.queue.enqueue(self.placehdrs)
self.close_op = self.queue.close(cancel_pending_enqueues=True)
self._running = threading.Event()
self._running.set()
# self._size = queue.size()
def run(self):
with self.default_sess():
try:
self.reinitialize_dataflow()
while True:
# pausable loop
if not self._running.is_set():
self._running.wait()
dp = next(self._itr)
feed = _make_feeds(self.placehdrs, dp)
# _, sz = sess.run([self.op, self._sz], feed_dict=feed)
self.op.run(feed_dict=feed)
except (tf.errors.CancelledError, tf.errors.OutOfRangeError):
pass
except DataFlowTerminated:
logger.info("[EnqueueThread] DataFlow has terminated.")
except Exception as e:
if isinstance(e, RuntimeError) and 'closed Session' in str(e):
pass
else:
logger.exception("[EnqueueThread] Exception in thread {}:".format(self.name))
finally:
try:
self.close_op.run()
except Exception:
pass
logger.info("[EnqueueThread] Thread {} Exited.".format(self.name))
def reinitialize_dataflow(self):
self._itr = self.dataflow.__iter__()
def pause(self):
self._running.clear()
def resume(self):
self._running.set()
class QueueInput(FeedfreeInput):
""" Enqueue datapoints from a DataFlow to a TF queue.
And the model receives dequeued tensors.
"""
def __init__(self, ds, queue=None):
"""
Args:
ds(DataFlow): the input DataFlow.
queue (tf.QueueBase): A :class:`tf.QueueBase` whose type
should match the corresponding input signature of the model.
Defaults to a FIFO queue of size 50.
"""
if not isinstance(ds, DataFlow):
raise ValueError("QueueInput takes a DataFlow! Got {}".format(ds))
self.queue = queue
self.ds = ds
self._inf_ds = RepeatedData(ds, -1)
self._started = False
def _size(self):
return len(self.ds)
def _setup(self, inputs):
self._input_placehdrs = [build_or_reuse_placeholder(v) for v in inputs]
assert len(self._input_placehdrs) > 0, \
"QueueInput has to be used with some inputs!"
with self.cached_name_scope():
if self.queue is None:
self.queue = tfv1.FIFOQueue(
50, [x.dtype for x in self._input_placehdrs],
name='input_queue')
logger.info("Setting up the queue '{}' for CPU prefetching ...".format(self.queue.name))
self.thread = EnqueueThread(self.queue, self._inf_ds, self._input_placehdrs)
self._dequeue_op = self.queue.dequeue(name='dequeue_for_reset')
def refill_queue(self):
"""
Clear the queue, then call dataflow.__iter__() again and fill into the queue.
"""
self.thread.pause() # pause enqueue
opt = tfv1.RunOptions()
opt.timeout_in_ms = 2000 # 2s
sess = tfv1.get_default_session()
# dequeue until empty
try:
while True:
sess.run(self._dequeue_op, options=opt)
except tf.errors.DeadlineExceededError:
pass
# reset dataflow, start thread
self.thread.reinitialize_dataflow()
self.thread.resume()
def _create_ema_callback(self):
"""
Create a hook-only callback which maintain EMA of the queue size.
Also tf.summary.scalar the EMA.
"""
with self.cached_name_scope():
# in TF there is no API to get queue capacity, so we can only summary the size
size = tf.cast(self.queue.size(), tf.float32, name='queue_size')
size_ema_op = add_moving_summary(size, collection=None, decay=0.5)[0].op
ret = RunOp(
lambda: size_ema_op,
run_before=False,
run_as_trigger=False,
run_step=True)
ret.name_scope = "InputSource/EMA"
return ret
def _get_callbacks(self):
from ..callbacks.concurrency import StartProcOrThread
cb = StartProcOrThread(self.thread)
return [cb, self._create_ema_callback(), _get_reset_callback(self._inf_ds)]
def _get_input_tensors(self):
with tf.device('/cpu:0'), self.cached_name_scope():
ret = self.queue.dequeue(name='input_deque')
if isinstance(ret, tf.Tensor): # only one input
ret = [ret]
assert len(ret) == len(self._input_placehdrs)
for qv, v in zip(ret, self._input_placehdrs):
qv.set_shape(v.get_shape())
return ret
class BatchQueueInput(QueueInput):
""" Enqueue datapoints from a DataFlow to a TF queue.
And the model receives batches formed by concatenating
dequeued tensors.
"""
def __init__(self, ds, batch_size, queue=None):
"""
Args:
ds(DataFlow): the input DataFlow.
batch_size(int): the batch size.
queue (tf.QueueBase): A :class:`tf.QueueBase` whose type
should match the corresponding input signature of the model.
Defaults to a FIFO queue of size 3000.
"""
super(BatchQueueInput, self).__init__(ds, queue)
self.batch_size = int(batch_size)
def _size(self):
return len(self.ds) // self.batch_size
def _setup(self, inputs):
logger.info("Setting up the queue for CPU prefetching ...")
self.input_placehdrs = [build_or_reuse_placeholder(v) for v in inputs]
assert len(self.input_placehdrs) > 0, \
"BatchQueueInput has to be used with some input signature!"
# prepare placeholders without the first dimension
placehdrs_nobatch = []
for p in self.input_placehdrs:
placehdrs_nobatch.append(tfv1.placeholder(
dtype=p.dtype, shape=p.get_shape().as_list()[1:],
name=get_op_tensor_name(p.name)[0] + '-nobatch'))
# dequeue_many requires fully-defined shapes
shape_err = "Use of BatchQueueInput requires inputs to have fully-defined "
"shapes except for the batch dimension"
shapes = []
for p in placehdrs_nobatch:
assert p.get_shape().is_fully_defined(), shape_err
shapes.append(p.get_shape())
with self.cached_name_scope():
if self.queue is None:
self.queue = tf.FIFOQueue(
3000, [x.dtype for x in self.input_placehdrs],
shapes=shapes,
name='input_queue')
for shp in self.queue.shapes:
assert shp.is_fully_defined(), shape_err
self.thread = EnqueueThread(self.queue, self._inf_ds, placehdrs_nobatch)
def _get_input_tensors(self):
with tf.device('/cpu:0'), self.cached_name_scope():
ret = self.queue.dequeue_many(self.batch_size, name='input_deque')
if isinstance(ret, tf.Tensor): # only one input
ret = [ret]
assert len(ret) == len(self.input_placehdrs)
for qv, v in zip(ret, self.input_placehdrs):
shp = v.get_shape().as_list()
shp[0] = self.batch_size
qv.set_shape(shp)
return ret
# TODO tensor inputs can be drained? look at the new dataset API.
class TensorInput(FeedfreeInput):
""" Use inputs from a list of tensors, e.g. a TF data reading pipeline.
The PTB training example shows how to use it.
"""
def __init__(self, get_tensor_fn, size=None):
"""
Args:
get_tensor_fn ( -> [tf.Tensor]): a function which returns a list of input tensors
(for example, [image, label]) when called.
It will be called under a TowerContext and should return the inputs to be used in that tower.
The returned tensors will be evaluated every iteration, it's your job to make sure it's possible.
size(int): size of this input. Use None to leave it undefined.
"""
if not callable(get_tensor_fn):
raise ValueError("get_tensor_fn has to be a function! Got {}".format(get_tensor_fn))
self.get_tensor_fn = get_tensor_fn
if size is not None:
size = int(size)
assert size > 0
self._fixed_size = size
def _setup(self, input_signature):
self._spec = input_signature
def _size(self):
if self._fixed_size is None:
raise NotImplementedError("size of TensorInput is undefined!")
return self._fixed_size
def _get_input_tensors(self):
with self.cached_name_scope():
ret = self.get_tensor_fn()
assert isinstance(ret, (list, tuple)), "get_tensor_fn needs to return a list!"
assert len(ret) == len(self._spec), \
"get_tensor_fn returns {} tensors but there are {} inputs".format(len(ret), len(self._spec))
return ret
class DummyConstantInput(TensorInput):
""" Input with a constant zero tensor placed on GPU.
Useful for debugging performance issues """
def __init__(self, shapes):
"""
Args:
shapes (list[list]): a list of fully-specified shapes.
"""
self.shapes = shapes
logger.warn("Using dummy input for debug!")
def fn():
tlist = []
ctx = get_current_tower_context()
assert ctx is not None
assert len(self.shapes) == len(self._spec)
for idx, p in enumerate(self._spec):
tlist.append(tf.constant(
0, dtype=p.dtype,
name='dummy-{}-{}'.format(p.name, ctx.index),
shape=self.shapes[idx]))
return tlist
super(DummyConstantInput, self).__init__(fn)
class ZMQInput(TensorInput):
"""
Receive tensors from a ZMQ endpoint, with ops from https://github.com/tensorpack/zmq_ops.
It works with :func:`dataflow.remote.send_dataflow_zmq(format='zmq_ops')`.
"""
def __init__(self, end_point, hwm, bind=True):
"""
Args:
end_point (str): the ZMQ endpoint
hwm (int): the ZMQ high-water-mark
"""
self._end_point = end_point
self._hwm = int(hwm)
self._bind = bind
def fn():
ret = self._zmq_pull_socket.pull()
assert len(ret) == len(self._spec)
for qv, v in zip(ret, self._spec):
qv.set_shape(v.shape)
return ret
super(ZMQInput, self).__init__(fn)
def _setup(self, input_signature):
super(ZMQInput, self)._setup(input_signature)
assert len(input_signature) > 0, \
"ZMQInput has to be used with input signature!"
import zmq_ops
self._zmq_pull_socket = zmq_ops.ZMQPullSocket(
self._end_point,
[x.dtype for x in input_signature],
hwm=self._hwm,
bind=self._bind)
def to_dataset(self, input_signature):
"""
Convert to a TF dataset.
Args:
input_signature (list[InputSpec]):
Returns:
tf.data.Dataset
"""
import zmq_ops
zmq_pull_socket = zmq_ops.ZMQPullSocket(
self._end_point, [x.dtype for x in input_signature],
hwm=self._hwm, bind=self._bind)
def mapper(_):
inputs = list(zmq_pull_socket.pull())
for v, sig in zip(inputs, input_signature):
v.set_shape(sig.shape)
return inputs
# Is there a better way to construct from stateful tensor?
dataset = tf.data.Dataset.from_tensors([1]) # just a placeholder
return dataset.map(mapper)
class TFDatasetInput(FeedfreeInput):
"""
Use a :class:`tf.data.Dataset` instance as input.
Note:
1. In training, the given dataset or dataflow has to be infinite
(you can use :func:`repeat()`, or :class:`RepeatedData` ).
2. TensorFlow may keep the dataflow alive even if the dataset is no
longer used.
"""
def __init__(self, dataset):
"""
Args:
dataset (tf.data.Dataset or DataFlow):
"""
if isinstance(dataset, tf.data.Dataset):
self._dataset = dataset
self._dataflow = None
elif isinstance(dataset, DataFlow):
self._dataset = None
self._dataflow = dataset
else:
raise ValueError("TFDatasetInput takes a tf.data.Dataset or DataFlow! Got {}".format(dataset))
def _setup(self, input_signature):
self._spec = input_signature
if self._dataset is not None:
types = self._dataset.output_types
if len(types) == 1:
types = (types,)
spec_types = tuple(k.dtype for k in input_signature)
assert len(types) == len(spec_types), \
"Dataset and input signature have different length! {} != {}".format(
len(types), len(spec_types))
assert types == spec_types, \
"Data types of dataset and input signature don't match! {} != {}".format(
str(types), str(spec_types))
shapes = self._dataset.output_shapes
spec_shapes = [k.shape for k in input_signature]
for idx, (s1, s2) in enumerate(zip(shapes, spec_shapes)):
s2 = tf.TensorShape(s2)
assert s2.is_compatible_with(s1), \
"Input signature '{}' has incompatible shape with dataset! {} vs {}".format(
input_signature[idx].name, s2, s1)
else:
self._dataset = TFDatasetInput.dataflow_to_dataset(self._dataflow, [x.dtype for x in input_signature])
self._iterator = self._dataset.make_initializable_iterator()
self._init_op = self._iterator.initializer
def _reset_state(self):
self._init_op.run()
def _get_input_tensors(self):
spec_shapes = [k.shape for k in self._spec]
ret = self._iterator.get_next()
assert len(ret) == len(spec_shapes), \
"Dataset returns {} tensors but there are {} inputs!".format(len(ret), len(spec_shapes))
for t, shp in zip(ret, spec_shapes):
t.set_shape(shp)
return ret
@staticmethod
def dataflow_to_dataset(df, types):
"""
Wrap a dataflow to tf.data.Dataset.
This function will also reset the dataflow.
If the dataflow itself is finite, the returned dataset is also finite.
Therefore, if used for training, you'll need to add `.repeat()` on the returned
dataset.
Args:
df (DataFlow): a dataflow which produces lists
types([tf.DType]): list of types
Returns:
(tf.data.Dataset)
Note:
TensorFlow may keep the dataflow alive even if the dataset is no
longer used.
"""
# TODO theoretically it can support dict
assert isinstance(df, DataFlow), df
assert isinstance(types, (list, tuple)), types
df = MapData(df, tuple)
df.reset_state()
ds = tf.data.Dataset.from_generator(
df.get_data, tuple(types))
return ds
class StagingInput(FeedfreeInput):
"""
A wrapper around a feedfree input,
to prefetch the input in StagingArea (on GPUs).
It works by registering hooks to put & get tensors into the StagingArea.
If `get_input_tensors` gets called multiple times,
it requires that all outputs ever produced by this InputSource will be fetched together.
This means that in multi-GPU training, you should ensure that each call on `hooked_sess.run`
depends on either all input tensors on all GPUs, or no input tensors at all.
As a result you cannot use this InputSource for :class:`InferenceRunner`.
More than one StagingInput cannot be used together.
"""
class StagingCallback(Callback):
"""
A callback registered by this input source, to make sure stage/unstage
is run at each step.
"""
def __init__(self, input, nr_stage):
self.nr_stage = nr_stage
self._input = input
self._initialized = False
def _setup_graph(self):
self.stage_op = self._input._get_stage_op()
unstage_ops = self._input._get_unstage_ops()
unstage_op = tf.group(*unstage_ops, name='unstage_all')
self._check_dependency_op = unstage_ops[0]
self.fetches = tfv1.train.SessionRunArgs(
fetches=[self.stage_op, unstage_op])
def _prefill(self, sess):
logger.info("Pre-filling StagingArea ...")
for _ in range(self.nr_stage):
self.stage_op.run(session=sess)
logger.info("{} element{} put into StagingArea on each tower.".format(
self.nr_stage, "s were" if self.nr_stage > 1 else " was"))
def _before_run(self, ctx):
# This has to happen once, right before the first iteration.
# doing it in `before_train` may not work because QueueInput happens in before_train.
if not self._initialized:
self._initialized = True
self._prefill(ctx.session)
# Only step the stagingarea when the input is evaluated in this sess.run
fetches = ctx.original_args.fetches
if dependency_of_fetches(fetches, self._check_dependency_op):
# note: this disable nesting of StagingInput
return self.fetches
def __init__(self, input, nr_stage=1, device=None):
"""
Args:
input (FeedfreeInput):
nr_stage (int): number of elements to prefetch into each StagingArea, at the beginning.
Since enqueue and dequeue are synchronized, prefetching 1 element should be sufficient.
device (str or None): if not None, place the StagingArea on a specific device. e.g., '/cpu:0'.
Otherwise, they are placed under where `get_inputs_tensors`
gets called, which could be unspecified in case of simple trainers.
"""
if not isinstance(input, FeedfreeInput):
raise ValueError("StagingInput takes a FeedfreeInput! Got {}".format(input))
if isinstance(input, StagingInput):
raise ValueError("StagingInput cannot be nested!")
self._input = input
self._nr_stage = nr_stage
self._areas = []
self._stage_ops = []
self._unstage_ops = []
self._device = device
def _setup(self, inputs):
self._input.setup(inputs)
with self.cached_name_scope():
pass # just to cache the correct ns to use
def _get_callbacks(self):
cbs = self._input.get_callbacks()
# this callback has to happen after others, so StagingInput can be stacked together
cbs.append(
StagingInput.StagingCallback(self, self._nr_stage))
return cbs
def _size(self):
return self._input.size()
@contextmanager
def _device_ctx(self):
if not self._device:
yield
else:
with tf.device(self._device):
yield
def _get_input_tensors(self):
inputs = self._input.get_input_tensors()
with self._device_ctx():
with self.cached_name_scope():
# Putting variables to stagingarea will cause trouble
dtypes = []
for idx in range(len(inputs)):
dtype = inputs[idx].dtype
if dtype.base_dtype != dtype: # is reference type
inputs[idx] = tf.identity(inputs[idx])
dtypes.append(dtype.base_dtype)
# TODO tensorflow/benchmarks use static shapes here,
# though it doesn't seem to help. We can use it when it's known.
# Setting capacity to 1 to potentially save some memory, because we should
# expect the consumers to run slower than the producer.
stage = StagingArea(dtypes, shapes=None, capacity=1)
# put & get automatically inherit the name scope from the area
self._stage_ops.append(stage.put(inputs))
self._areas.append(stage)
outputs = stage.get()
if isinstance(outputs, tf.Tensor): # when size=1, TF doesn't return a list
outputs = [outputs]
for vin, vout in zip(inputs, outputs):
vout.set_shape(vin.get_shape())
self._unstage_ops.append(outputs)
# self._size_ops.append(stage.size())
return outputs
def _get_stage_op(self):
with self.cached_name_scope():
return tf.group(*self._stage_ops)
def _get_unstage_ops(self):
with self.cached_name_scope():
all_outputs = list(chain.from_iterable(self._unstage_ops))
return all_outputs
# for debugging only
def _create_ema_callback(self):
def create_ema_op():
with self.cached_name_scope():
avg_size = tf.truediv(tf.add_n(self._size_ops), len(self._size_ops), name='avg_stagingarea_size')
return add_moving_summary(avg_size, collection=None)[0].op
return RunOp(
create_ema_op,
run_before=False,
run_as_trigger=False,
run_step=True)
| 26,473 | 35.97486 | 118 | py |
SyNet | SyNet-master/tensorpack/tensorpack/input_source/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .input_source_base import *
from .input_source import *
from pkgutil import iter_modules
import os
import os.path
__all__ = []
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else []
del globals()[name]
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
_CURR_DIR = os.path.dirname(__file__)
_SKIP = []
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if module_name.startswith('_'):
continue
if module_name not in _SKIP:
global_import(module_name)
| 1,006 | 24.820513 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/input_source/input_source_base.py | # -*- coding: utf-8 -*-
# File: input_source_base.py
import copy
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import six
import tensorflow as tf
from ..callbacks.base import CallbackFactory
from ..tfutils.common import get_op_tensor_name
from ..utils import logger
from ..utils.argtools import call_only_once, memoized_method
from ..compat import tfv1
__all__ = ['InputSource', 'remap_input_source']
def build_or_reuse_placeholder(tensor_spec):
"""
Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one.
Args:
tensor_spec (tf.TensorSpec):
Returns:
tf.Tensor:
"""
g = tfv1.get_default_graph()
name = tensor_spec.name
try:
tensor = g.get_tensor_by_name(name + ':0')
assert "Placeholder" in tensor.op.type, "Tensor {} exists but is not a placeholder!".format(name)
assert tensor_spec.is_compatible_with(tensor), \
"Tensor {} exists but is not compatible with the signature!".format(tensor)
if tensor.shape.as_list() == tensor_spec.shape.as_list():
# It might be desirable to use a placeholder of a different shape in some tower
# (e.g., a less specific shape)
# Comparing `tensor.shape` directly doesn't work, because
# tensorflow thinks `tf.Dimension(None)` and `tf.Dimension(None)` are not equal.
return tensor
except KeyError:
pass
with tfv1.name_scope(None): # clear any name scope it might get called in
ret = tfv1.placeholder(
tensor_spec.dtype, shape=tensor_spec.shape, name=tensor_spec.name)
return ret
def get_tensors_inputs(placeholders, tensors, names):
"""
Args:
placeholders (list[Tensor]):
tensors (list[Tensor]): list of tf.Tensor
names (list[str]): names matching the given tensors
Returns:
list[Tensor]: inputs to used for the tower function,
with the corresponding placeholders replaced by tensors.
"""
assert len(tensors) == len(names), \
"Input tensors {} and input names {} have different length!".format(
tensors, names)
ret = copy.copy(placeholders)
placeholder_names = [p.name for p in placeholders]
for name, tensor in zip(names, tensors):
tensorname = get_op_tensor_name(name)[1]
try:
idx = placeholder_names.index(tensorname)
except ValueError:
logger.error("Name {} is not a model input!".format(tensorname))
raise
ret[idx] = tensor
return ret
def get_sublist_by_names(lst, names):
"""
Args:
lst (list): list of objects with "name" property.
Returns:
list: a sublist of objects, matching names
"""
orig_names = [p.name for p in lst]
ret = []
for name in names:
try:
idx = orig_names.index(name)
except ValueError:
logger.error("Name {} doesn't appear in lst {}!".format(
name, str(orig_names)))
raise
ret.append(lst[idx])
return ret
@six.add_metaclass(ABCMeta)
class InputSource(object):
""" Base class for the abstract InputSource. """
_name_scope = None
_setup_done = False
def get_input_tensors(self):
"""
Returns:
list[Tensor]: A list of tensors corresponding to the inputs of the model.
Will be used as input for the tower function.
This method should always create and return new tensors when called,
unless it returns placeholders.
"""
return self._get_input_tensors()
@abstractmethod
def _get_input_tensors(self):
pass
@call_only_once
def setup(self, input_signature):
"""
Args:
input_signature (list[tf.TensorSpec]): list of specs for each input tensor
Returns:
list[Callback]: extra callbacks needed by this InputSource.
callbacks of InputSource cannot use any `trigger*()` method.
"""
self._setup(input_signature)
self._setup_done = True
return self.get_callbacks()
def _setup(self, input_signature):
pass
def setup_done(self):
"""
Returns:
bool: whether :meth:`setup()` has been called.
"""
return self._setup_done
@memoized_method
def get_callbacks(self):
"""
An InputSource might need some extra maintenance during training,
which is done also through the Callback interface.
This method returns the callbacks and the return value will be memoized.
All callbacks will be automatically marked as `chief_only=False`,
so they will run on all nodes.
Callbacks returned by :class:`InputSource` only supports a subset of callback's functionalities:
1. It cannot access the trainer, because an :class:`InputSource` can be used in pure inference.
2. It cannot use the following methods: `trigger_{step,epoch}, {before,after}_epoch`.
In other words, these callbacks should only have the basic functionality of `tf.train.SessionRunHooks`.
Returns:
list[Callback]: extra callbacks needed by this InputSource.
"""
assert self.setup_done()
ret = [CallbackFactory(
before_train=lambda _: self.reset_state())] + self._get_callbacks()
for r in ret:
r.set_chief_only(False) # no input callbacks should be chief-only
return ret
def _get_callbacks(self):
return []
def reset_state(self):
"""
Initialize/reinitialize this InputSource.
Must be called under a default session.
For training, it will get called once by the trainer in `before_train` callbacks.
For inference, the :class:`InferenceRunner` will call this method each time it is triggered.
"""
self._reset_state()
def _reset_state(self):
pass
def size(self):
"""
Returns:
int: epoch size of the InputSource
"""
return self._size()
def _size(self):
raise NotImplementedError()
@contextmanager
def cached_name_scope(self):
"""
Yield a context under a cached name scope, whose name is the name of
this InputSource class.
"""
if self._name_scope:
with tf.name_scope(self._name_scope):
yield self._name_scope
else:
name = type(self).__name__
with tf.name_scope(name) as ns:
self._name_scope = ns
yield ns
class ProxyInputSource(InputSource):
"""
An InputSource which proxy every method to ``self._input``.
"""
def __init__(self, input):
assert isinstance(input, InputSource), input
self._input = input
def _get_input_tensors(self):
return self._input.get_input_tensors()
def _setup(self, input_signature):
self._input.setup(input_signature)
def _get_callbacks(self):
return self._input.get_callbacks()
def _size(self):
return self._input.size()
def _reset_state(self):
self._input.reset_state()
def remap_input_source(input, names):
"""
When you have some :class:`InputSource` which doesn't match the inputs of
your tower function, use `RemapInputSource`.
It produces placeholders for all the inputs in your model,
except that the corresponding ones are replaced with the tensor produced
by the given :class:`InputSource`.
Example:
.. code-block:: python
input1 = QueueInput(ds)
# assume ds produces data that should be fed to 'image' and 'label',
# but the graph takes more inputs for some reasons, or takes inputs
# of a different order, for example like the following:
# input_signature = [tf.TensorSpec((None,10), tf.float32, 'score'),
# tf.TensorSpec((None,20,20,3), tf.float32, 'label'),
# tf.TensorSpec((None,), tf.int32, 'image') ]
input2 = remap_input_source(input1, ['image', 'label'])
# now, if input2 is used with the above input_signature, it will return a
# placeholder for 'score', plus the tensors returned by input1
"""
def __init__(self, input, names):
"""
Args:
input(InputSource): a :class:`InputSource`, whose tensors will get mapped.
names(list[str]): list of input names corresponding to the tensors
produced by ``input``.
Returns:
InputSource:
"""
ProxyInputSource.__init__(self, input)
assert isinstance(names, (list, tuple)), names
self._names = tuple(names)
def _setup(self, inputs):
self._all_placehdrs = [build_or_reuse_placeholder(v) for v in inputs]
inputs_subset = get_sublist_by_names(inputs, self._names)
self._input.setup(inputs_subset)
def _get_input_tensors(self):
ret = self._input.get_input_tensors()
assert len(ret) == len(self._names)
return get_tensors_inputs(
self._all_placehdrs, ret, self._names)
oldcls = type(input)
# inherit oldcls so that type check in various places would work
cls = type('Remapped' + oldcls.__name__, (ProxyInputSource, oldcls), {
'__init__': __init__,
'_setup': _setup,
'_get_input_tensors': _get_input_tensors})
return cls(input, names)
| 9,603 | 31.555932 | 111 | py |
SyNet | SyNet-master/tensorpack/tensorpack/compat/__init__.py | #!/usr/bin/env python
import tensorflow as tf
def backport_tensor_spec():
if hasattr(tf, 'TensorSpec'):
return tf.TensorSpec
try:
# available since 1.7
from tensorflow.python.framework.tensor_spec import TensorSpec
except ImportError:
pass
else:
tf.TensorSpec = TensorSpec
return TensorSpec
from .tensor_spec import TensorSpec
tf.TensorSpec = TensorSpec
return TensorSpec
def is_tfv2():
try:
from tensorflow.python import tf2
return tf2.enabled()
except Exception:
return False
if is_tfv2():
tfv1 = tf.compat.v1
if not hasattr(tf, 'layers'):
# promised at https://github.com/tensorflow/community/pull/24#issuecomment-440453886
tf.layers = tf.keras.layers
else:
try:
tfv1 = tf.compat.v1 # this will silent some warnings
except AttributeError:
tfv1 = tf
| 917 | 21.390244 | 92 | py |
SyNet | SyNet-master/tensorpack/tensorpack/compat/tensor_spec.py |
"""
Copied from tensorflow/python/framework/tensor_spec.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
class TensorSpec(object):
"""Describes a tf.Tensor.
Metadata for describing the `tf.Tensor` objects accepted or returned
by some TensorFlow APIs.
"""
__slots__ = ["_shape", "_shape_tuple", "_dtype", "_name"]
def __init__(self, shape, dtype=dtypes.float32, name=None):
"""Creates a TensorSpec.
Args:
shape: Value convertible to `tf.TensorShape`. The shape of the tensor.
dtype: Value convertible to `tf.DType`. The type of the tensor values.
name: Optional name for the Tensor.
Raises:
TypeError: If shape is not convertible to a `tf.TensorShape`, or dtype is
not convertible to a `tf.DType`.
"""
self._shape = tensor_shape.TensorShape(shape)
try:
self._shape_tuple = tuple(self.shape.as_list())
except ValueError:
self._shape_tuple = None
self._dtype = dtypes.as_dtype(dtype)
self._name = name
@classmethod
def from_spec(cls, spec, name=None):
return cls(spec.shape, spec.dtype, name or spec.name)
@classmethod
def from_tensor(cls, tensor, name=None):
if isinstance(tensor, ops.EagerTensor):
return TensorSpec(tensor.shape, tensor.dtype, name)
elif isinstance(tensor, ops.Tensor):
return TensorSpec(tensor.shape, tensor.dtype, name or tensor.op.name)
else:
raise ValueError("`tensor` should be a tf.Tensor")
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of the tensor."""
return self._shape
@property
def dtype(self):
"""Returns the `dtype` of elements in the tensor."""
return self._dtype
@property
def name(self):
"""Returns the (optionally provided) name of the described tensor."""
return self._name
def is_compatible_with(self, spec_or_tensor):
"""Returns True if spec_or_tensor is compatible with this TensorSpec.
Two tensors are considered compatible if they have the same dtype
and their shapes are compatible (see `tf.TensorShape.is_compatible_with`).
Args:
spec_or_tensor: A tf.TensorSpec or a tf.Tensor
Returns:
True if spec_or_tensor is compatible with self.
"""
return (self._dtype.is_compatible_with(spec_or_tensor.dtype) and
self._shape.is_compatible_with(spec_or_tensor.shape))
def __repr__(self):
return "TensorSpec(shape={}, dtype={}, name={})".format(
self.shape, repr(self.dtype), repr(self.name))
def __hash__(self):
return hash((self._shape_tuple, self.dtype))
def __eq__(self, other):
return (self._shape_tuple == other._shape_tuple # pylint: disable=protected-access
and self.dtype == other.dtype
and self._name == other._name) # pylint: disable=protected-access
def __ne__(self, other):
return not self == other
def __reduce__(self):
return TensorSpec, (self._shape, self._dtype, self._name)
| 3,255 | 29.429907 | 87 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/fc.py | # -*- coding: utf-8 -*-
# File: fc.py
import numpy as np
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..tfutils.common import get_tf_version_tuple
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args, rename_get_variable
__all__ = ['FullyConnected']
def batch_flatten(x):
"""
Flatten the tensor except the first dimension.
"""
shape = x.get_shape().as_list()[1:]
if None not in shape:
return tf.reshape(x, [-1, int(np.prod(shape))])
return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['units'],
name_mapping={'out_dim': 'units'})
def FullyConnected(
inputs,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None):
"""
A wrapper around `tf.layers.Dense`.
One difference to maintain backward-compatibility:
Default weight initializer is variance_scaling_initializer(2.0).
Variable Names:
* ``W``: weights of shape [in_dim, out_dim]
* ``b``: bias
"""
if kernel_initializer is None:
if get_tf_version_tuple() <= (1, 12):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0) # deprecated
else:
kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')
inputs = batch_flatten(inputs)
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Dense(
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
_reuse=tf.get_variable_scope().reuse)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
return ret
| 2,337 | 30.594595 | 110 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/batch_norm.py | # Copyright (c) Tensorpack Contributors. All Rights Reserved
# -*- coding: utf-8 -*-
# File: batch_norm.py
import re
from ..compat import tfv1 as tf # this should be avoided first in model code
from tensorflow.python.training import moving_averages
from ..tfutils.collection import backup_collection, restore_collection
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.tower import get_current_tower_context
from ..utils import logger
from ..utils.argtools import get_data_format, log_once
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args, rename_get_variable
from .utils import disable_autograph
__all__ = ['BatchNorm', 'BatchRenorm']
# decay: being too close to 1 leads to slow start-up. torch use 0.9.
# eps: torch: 1e-5. Lasagne: 1e-4
def get_bn_variables(n_out, use_scale, use_bias, beta_init, gamma_init):
if use_bias:
beta = tf.get_variable('beta', [n_out], initializer=beta_init)
else:
beta = tf.zeros([n_out], name='beta')
if use_scale:
gamma = tf.get_variable('gamma', [n_out], initializer=gamma_init)
else:
gamma = tf.ones([n_out], name='gamma')
# x * gamma + beta
moving_mean = tf.get_variable('mean/EMA', [n_out],
initializer=tf.constant_initializer(), trainable=False)
moving_var = tf.get_variable('variance/EMA', [n_out],
initializer=tf.constant_initializer(1.0), trainable=False)
if get_current_tower_context().is_main_training_tower:
for v in [moving_mean, moving_var]:
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)
return beta, gamma, moving_mean, moving_var
def internal_update_bn_ema(xn, batch_mean, batch_var,
moving_mean, moving_var, decay):
update_op1 = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay, zero_debias=False,
name='mean_ema_op')
update_op2 = moving_averages.assign_moving_average(
moving_var, batch_var, decay, zero_debias=False,
name='var_ema_op')
# When sync_statistics is True, always enable internal_update.
# Otherwise the update ops (only executed on main tower)
# will hang when some BatchNorm layers are unused (https://github.com/tensorpack/tensorpack/issues/1078)
with tf.control_dependencies([update_op1, update_op2]):
return tf.identity(xn, name='output')
def get_sync_bn_mean_var(inputs, red_axis, sync_statistics):
ctx = get_current_tower_context()
batch_mean = tf.reduce_mean(inputs, axis=red_axis)
batch_mean_square = tf.reduce_mean(tf.square(inputs), axis=red_axis)
TF_version = get_tf_version_tuple()
if sync_statistics == 'nccl':
num_dev = ctx.total
if num_dev == 1:
logger.warn("BatchNorm(sync_statistics='nccl') is used with only one tower!")
else:
assert TF_version >= (1, 10), \
"Cross-GPU BatchNorm is only supported in TF>=1.10 ." \
"Upgrade TF or apply this patch manually: https://github.com/tensorflow/tensorflow/pull/20360"
if TF_version <= (1, 12):
try:
from tensorflow.contrib.nccl.python.ops.nccl_ops import _validate_and_load_nccl_so # deprecated
except Exception:
pass
else:
_validate_and_load_nccl_so()
from tensorflow.contrib.nccl.ops import gen_nccl_ops # deprecated
else:
from tensorflow.python.ops import gen_nccl_ops
shared_name = re.sub('tower[0-9]+/', '', tf.get_variable_scope().name)
batch_mean = gen_nccl_ops.nccl_all_reduce(
input=batch_mean,
reduction='sum',
num_devices=num_dev,
shared_name=shared_name + '_NCCL_mean') * (1.0 / num_dev)
batch_mean_square = gen_nccl_ops.nccl_all_reduce(
input=batch_mean_square,
reduction='sum',
num_devices=num_dev,
shared_name=shared_name + '_NCCL_mean_square') * (1.0 / num_dev)
elif sync_statistics == 'horovod':
# Require https://github.com/uber/horovod/pull/331
import horovod.tensorflow as hvd
if hvd.size() == 1:
logger.warn("BatchNorm(sync_statistics='horovod') is used with only one process!")
else:
import horovod
hvd_version = tuple(map(int, horovod.__version__.split('.')[:3]))
assert hvd_version >= (0, 13, 6), "sync_statistics=horovod needs horovod>=0.13.6 !"
batch_mean = hvd.allreduce(batch_mean, average=True)
batch_mean_square = hvd.allreduce(batch_mean_square, average=True)
batch_var = batch_mean_square - tf.square(batch_mean)
return batch_mean, batch_var
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
'decay': 'momentum',
'use_local_stat': 'training'
})
@disable_autograph()
def BatchNorm(inputs, axis=None, *, training=None, momentum=0.9, epsilon=1e-5,
center=True, scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
virtual_batch_size=None,
data_format='channels_last',
ema_update='default',
sync_statistics=None):
"""
A more powerful version of `tf.layers.batch_normalization`. It differs from
the offical one in the following aspects:
1. Accepts an alternative ``data_format`` option when ``axis`` is None. For 2D input, this argument will be ignored.
2. Default value for ``momentum`` and ``epsilon`` is different.
3. Default value for ``training`` is automatically obtained from tensorpack's ``TowerContext``.
User-provided value can overwrite this behavior.
4. Support the ``ema_update`` option, which covers broader use cases than the standard EMA update.
5. Support the ``sync_statistics`` option, which implements "SyncBN" and is very useful in small-batch models.
6. Better support of the ``virtual_batch_size`` option that does not have the bugs in ``tf.layers``.
Args:
training (bool): if True, use per-batch statistics to normalize. Otherwise, use stored EMA
to normalize. By default, it is equal to `get_current_tower_context().is_training`.
This is not a good argument name, but it is what the Tensorflow layer uses.
virtual_batch_size (int): implement "Ghost BatchNorm" that normalizes
the data with a smaller batch size than the input. Only effective when training is True.
The value has to be a divisor of the actual batch size.
It does not use the buggy TensorFlow implementation which has the
problems of (1) wrong behavior at inference; (2) create variables with unnecessary size=1 dimensions.
Corresponding TF issue: https://github.com/tensorflow/tensorflow/issues/23050
ema_update (str): Only effective when ``training=True``. It has the following options:
* "default": same as "collection". Because this is the default behavior in TensorFlow.
* "skip": do not update EMA. This can be useful when you reuse a batch norm layer in several places
but do not want them to all update your EMA.
* "collection": Add EMA update ops to collection `tf.GraphKeys.UPDATE_OPS`.
The ops in the collection will be run automatically by the callback :class:`RunUpdateOps`, along with
your training iterations. This can waste compute if your training iterations do not always depend
on the BatchNorm layer.
* "internal": EMA is updated inside this layer itself by control dependencies.
In standard scenarios, it has similar speed to "collection". But it has some more benefits:
1. BatchNorm is used inside dynamic control flow.
The collection-based update does not support dynamic control flows.
2. BatchNorm layer is sometimes unused (e.g., in GANs you have two networks to train alternatively).
Putting all update ops into a single collection will waste a lot of compute.
3. Other part of the model relies on the "updated" EMA. The collection-based method does not update
EMA immediately.
4. It has less chance to cause TensorFlow bugs in a graph with complicated control flow.
Therefore this option is preferred over TensorFlow default.
Corresponding TF issue: https://github.com/tensorflow/tensorflow/issues/14699
sync_statistics (str or None): one of None, "nccl", or "horovod". It determines how to compute the
"per-batch statistics" when ``training==True``.
* None: it uses statistics of the input tensor to normalize during training.
This is the standard way BatchNorm was implemented in most frameworks.
* "nccl": this layer must be used under tensorpack's multi-GPU trainers.
It uses the aggregated statistics of the whole batch (across all GPUs) to normalize.
* "horovod": this layer must be used under tensorpack's :class:`HorovodTrainer`.
It uses the aggregated statistics of the whole batch (across all MPI ranks) to normalize.
Note that on a single machine this is found to be slower than the "nccl" implementation.
When not None, each GPU computes its own E[x] and E[x^2],
which are then averaged among all GPUs to compute global mean & variance.
Therefore each GPU needs to have the same batch size.
The synchronization is based on the current variable scope + the name of the layer
(`BatchNorm('name', input)`). Therefore, you need to make sure that:
1. The BatchNorm layer on different GPUs needs to have the same name, so that
statistics can be synchronized. If names do not match, this layer will hang.
2. A BatchNorm layer cannot be reused within one tower.
3. A BatchNorm layer needs to be executed for the same number of times by all GPUs.
If different GPUs execute one BatchNorm layer for different number of times
(e.g., if some GPUs do not execute it), this layer may hang.
This option is also known as "SyncBN" or "Cross-GPU BatchNorm" as mentioned in:
`MegDet: A Large Mini-Batch Object Detector <https://arxiv.org/abs/1711.07240>`_.
Corresponding TF issue: https://github.com/tensorflow/tensorflow/issues/18222.
When `sync_statistics` is enabled, `ema_update` is set to "internal" automatically.
This is to avoid running `UPDATE_OPS`, which requires synchronization.
Variable Names:
* ``beta``: the bias term. Will be zero-inited by default.
* ``gamma``: the scale term. Will be one-inited by default.
* ``mean/EMA``: the moving average of mean.
* ``variance/EMA``: the moving average of variance.
Note:
This layer is more flexible than the standard "BatchNorm" layer and provides more features:
1. No matter whether you're doing training or not, you can set the ``training`` argument
to use batch statistics or EMA statistics.
i.e., you can use batch statistics during inference, or use EMA statistics during training.
Using EMA statistics in training is useful when you load a pre-trained BN and
don't want to update it.
2. As long as `training=True`, `sync_statistics` and `ema_update` option will take effect.
"""
# parse training/ctx
ctx = get_current_tower_context()
if training is None:
training = ctx.is_training
training = bool(training)
if not training:
virtual_batch_size = None
# parse shapes
data_format = get_data_format(data_format, keras_mode=False)
shape = inputs.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4], ndims
if sync_statistics is not None:
sync_statistics = sync_statistics.lower()
assert sync_statistics in [None, 'nccl', 'horovod'], sync_statistics
assert ema_update in ["default", "collection", "internal", "skip"]
if ema_update == "default":
ema_update = "collection"
# Logic:
# 1. EMA update is possible only when we compute batch statistics (training=True)
# 2. We know that in training, non-main training tower does not need EMA
# update (unless you need, e.g., inference during training on all towers)
# We don't know about what to do in prediction context, so be conservative and do the update.
# 3. User can explicit disable update by "skip".
do_ema_update = training and \
(ctx.is_main_training_tower or not ctx.is_training) \
and (ema_update != "skip")
if axis is None:
if ndims == 2:
axis = 1
else:
axis = 1 if data_format == 'NCHW' else 3
assert axis in [1, 3], axis
num_chan = shape[axis]
freeze_bn_backward = not training and ctx.is_training
if freeze_bn_backward:
if ctx.is_main_training_tower: # only warn in first tower
log_once("Some BatchNorm layer uses moving_mean/moving_variance in training.", func='warn')
# Using moving_mean/moving_variance in training, which means we
# loaded a pre-trained BN and only fine-tuning the affine part.
do_sync_bn = (sync_statistics is not None) and training
if not do_sync_bn and not virtual_batch_size:
# Use the builtin layer for regular per-GPU BN.
# Use our own implementation for SyncBN and GhostBN
coll_bk = backup_collection([tf.GraphKeys.UPDATE_OPS])
with rename_get_variable(
{'moving_mean': 'mean/EMA',
'moving_variance': 'variance/EMA'}):
tf_args = dict(
axis=axis,
momentum=momentum, epsilon=epsilon,
center=center, scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
fused=(ndims == 4 and axis in [1, 3]),
_reuse=tf.get_variable_scope().reuse)
use_fp16 = inputs.dtype == tf.float16
if use_fp16:
# non-fused does not support fp16; fused does not support all layouts.
# we made our best guess here
tf_args['fused'] = True
layer = tf.layers.BatchNormalization(**tf_args)
xn = layer.apply(inputs, training=training, scope=tf.get_variable_scope())
# Add EMA variables to the correct collection
if ctx.is_main_training_tower:
for v in layer.non_trainable_variables:
if isinstance(v, tf.Variable):
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)
if not do_ema_update:
restore_collection(coll_bk)
if do_ema_update and ema_update == "internal":
# Implement "internal" update.
restore_collection(coll_bk)
assert layer.updates
with tf.control_dependencies(layer.updates):
ret = tf.identity(xn, name='output')
else:
ret = tf.identity(xn, name='output')
vh = ret.variables = VariableHolder(
moving_mean=layer.moving_mean,
mean=layer.moving_mean, # for backward-compatibility
moving_variance=layer.moving_variance,
variance=layer.moving_variance) # for backward-compatibility
if scale:
vh.gamma = layer.gamma
if center:
vh.beta = layer.beta
else:
red_axis = [0] if ndims == 2 else ([0, 2, 3] if axis == 1 else [0, 1, 2])
beta, gamma, moving_mean, moving_var = get_bn_variables(
num_chan, scale, center, beta_initializer, gamma_initializer)
assert sync_statistics is None or virtual_batch_size is None, "Cannot use SyncBN and GhostBN together!"
new_shape = None # don't need to reshape unless ...
if sync_statistics is not None:
# sync bn
batch_mean, batch_var = get_sync_bn_mean_var(inputs, red_axis, sync_statistics)
batch_mean_vec = batch_mean
batch_var_vec = batch_var
if ndims == 4 and axis == 1:
new_shape = [1, num_chan, 1, 1]
batch_mean = tf.reshape(batch_mean, new_shape)
batch_var = tf.reshape(batch_var, new_shape)
else:
orig_shape = tf.shape(inputs)
inputs = tf.reshape(
inputs,
tf.concat([[-1, virtual_batch_size],
tf.shape(inputs)[1:]], axis=0))
# B/V, V, ...
red_axis = [x + 1 for x in red_axis]
new_shape = [1] * (ndims + 1)
new_shape[axis + 1] = num_chan
batch_mean, batch_var = tf.nn.moments(inputs, red_axis, keepdims=True)
# B/V, C
# vec for EMA update: use the first one only to mimic per-GPU BN
batch_mean_vec = tf.reshape(batch_mean[0], [num_chan])
batch_var_vec = tf.reshape(batch_var[0], [num_chan])
if new_shape is not None:
# Using fused_batch_norm(is_training=False) is actually slightly faster,
# but hopefully this call will be JITed in the future.
xn = tf.nn.batch_normalization(
inputs, batch_mean, batch_var,
tf.reshape(beta, new_shape),
tf.reshape(gamma, new_shape), epsilon)
else:
xn = tf.nn.batch_normalization(
inputs, batch_mean, batch_var,
beta, gamma, epsilon)
if virtual_batch_size is not None:
xn = tf.reshape(xn, orig_shape)
if do_ema_update:
ret = internal_update_bn_ema(
xn, batch_mean_vec, batch_var_vec, moving_mean, moving_var, momentum)
else:
ret = tf.identity(xn, name='output')
vh = ret.variables = VariableHolder(
moving_mean=moving_mean,
mean=moving_mean, # for backward-compatibility
moving_variance=moving_var,
variance=moving_var) # for backward-compatibility
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
'decay': 'momentum'
})
def BatchRenorm(x, rmax, dmax, *, momentum=0.9, epsilon=1e-5,
center=True, scale=True, gamma_initializer=None,
data_format='channels_last'):
"""
Batch Renormalization layer, as described in the paper:
`Batch Renormalization: Towards Reducing Minibatch Dependence in Batch-Normalized Models
<https://arxiv.org/abs/1702.03275>`_.
This implementation is a wrapper around `tf.layers.batch_normalization`.
Args:
x (tf.Tensor): a NHWC or NC tensor.
rmax, dmax (tf.Tensor): a scalar tensor, the maximum allowed corrections.
decay (float): decay rate of moving average.
epsilon (float): epsilon to avoid divide-by-zero.
use_scale, use_bias (bool): whether to use the extra affine transformation or not.
Returns:
tf.Tensor: a tensor named ``output`` with the same shape of x.
Variable Names:
* ``beta``: the bias term.
* ``gamma``: the scale term. Input will be transformed by ``x * gamma + beta``.
* ``moving_mean, renorm_mean, renorm_mean_weight``: See TF documentation.
* ``moving_variance, renorm_stddev, renorm_stddev_weight``: See TF documentation.
"""
shape = x.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4]
if ndims == 2:
data_format = 'channels_first'
ctx = get_current_tower_context()
coll_bk = backup_collection([tf.GraphKeys.UPDATE_OPS])
layer = tf.layers.BatchNormalization(
axis=1 if data_format == 'channels_first' else 3,
momentum=momentum, epsilon=epsilon,
center=center, scale=scale,
renorm=True,
renorm_clipping={
'rmin': 1.0 / rmax,
'rmax': rmax,
'dmax': dmax},
renorm_momentum=0.99,
gamma_initializer=gamma_initializer,
fused=False,
_reuse=tf.get_variable_scope().reuse)
xn = layer.apply(x, training=ctx.is_training, scope=tf.get_variable_scope())
if ctx.is_main_training_tower:
for v in layer.non_trainable_variables:
if isinstance(v, tf.Variable):
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)
else:
# only run UPDATE_OPS in the first tower
restore_collection(coll_bk)
if ndims == 2:
xn = tf.squeeze(xn, [1, 2])
ret = tf.identity(xn, name='output')
# TODO not sure whether to add moving_mean/moving_var to VH now
vh = ret.variables = VariableHolder()
if scale:
vh.gamma = layer.gamma
if center:
vh.beta = layer.beta
return ret
| 21,444 | 44.530786 | 120 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/conv2d.py | # -*- coding: utf-8 -*-
# File: conv2d.py
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..tfutils.common import get_tf_version_tuple
from ..utils.argtools import get_data_format, shape2d, shape4d, log_once
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args, rename_get_variable
__all__ = ['Conv2D', 'Deconv2D', 'Conv2DTranspose']
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['filters', 'kernel_size'],
name_mapping={
'out_channel': 'filters',
'kernel_shape': 'kernel_size',
'stride': 'strides',
})
def Conv2D(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
split=1):
"""
Similar to `tf.layers.Conv2D`, but with some differences:
1. Default kernel initializer is variance_scaling_initializer(2.0).
2. Default padding is 'same'.
3. Support 'split' argument to do group convolution.
Variable Names:
* ``W``: weights
* ``b``: bias
"""
if kernel_initializer is None:
if get_tf_version_tuple() <= (1, 12):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0) # deprecated
else:
kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')
dilation_rate = shape2d(dilation_rate)
if split == 1 and dilation_rate == [1, 1]:
# tf.layers.Conv2D has bugs with dilations (https://github.com/tensorflow/tensorflow/issues/26797)
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
_reuse=tf.get_variable_scope().reuse)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
else:
# group conv implementation
data_format = get_data_format(data_format, keras_mode=False)
in_shape = inputs.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!"
assert in_channel % split == 0
assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \
"Not supported by group conv or dilated conv!"
out_channel = filters
assert out_channel % split == 0
assert dilation_rate == [1, 1] or get_tf_version_tuple() >= (1, 5), 'TF>=1.5 required for dilated conv.'
kernel_shape = shape2d(kernel_size)
filter_shape = kernel_shape + [in_channel / split, out_channel]
stride = shape4d(strides, data_format=data_format)
kwargs = {"data_format": data_format}
if get_tf_version_tuple() >= (1, 5):
kwargs['dilations'] = shape4d(dilation_rate, data_format=data_format)
# matching input dtype (ex. tf.float16) since the default dtype of variable if tf.float32
inputs_dtype = inputs.dtype
W = tf.get_variable(
'W', filter_shape, dtype=inputs_dtype, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable('b', [out_channel], dtype=inputs_dtype, initializer=bias_initializer)
if split == 1:
conv = tf.nn.conv2d(inputs, W, stride, padding.upper(), **kwargs)
else:
conv = None
if get_tf_version_tuple() >= (1, 13):
try:
conv = tf.nn.conv2d(inputs, W, stride, padding.upper(), **kwargs)
except ValueError:
log_once("CUDNN group convolution support is only available with "
"https://github.com/tensorflow/tensorflow/pull/25818 . "
"Will fall back to a loop-based slow implementation instead!", 'warn')
if conv is None:
inputs = tf.split(inputs, split, channel_axis)
kernels = tf.split(W, split, 3)
outputs = [tf.nn.conv2d(i, k, stride, padding.upper(), **kwargs)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
ret = tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv
if activation is not None:
ret = activation(ret)
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['filters', 'kernel_size', 'strides'],
name_mapping={
'out_channel': 'filters',
'kernel_shape': 'kernel_size',
'stride': 'strides',
})
def Conv2DTranspose(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None):
"""
A wrapper around `tf.layers.Conv2DTranspose`.
Some differences to maintain backward-compatibility:
1. Default kernel initializer is variance_scaling_initializer(2.0).
2. Default padding is 'same'
Variable Names:
* ``W``: weights
* ``b``: bias
"""
if kernel_initializer is None:
if get_tf_version_tuple() <= (1, 12):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0) # deprecated
else:
kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')
if get_tf_version_tuple() <= (1, 12):
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv2DTranspose(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
_reuse=tf.get_variable_scope().reuse)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
else:
# Our own implementation, to avoid Keras bugs. https://github.com/tensorflow/tensorflow/issues/25946
assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \
"Unsupported arguments due to Keras bug in TensorFlow 1.13"
data_format = get_data_format(data_format, keras_mode=False)
shape_dyn = tf.shape(inputs)
shape_sta = inputs.shape.as_list()
strides2d = shape2d(strides)
kernel_shape = shape2d(kernel_size)
assert padding.lower() in ['valid', 'same'], "Padding {} is not supported!".format(padding)
if padding.lower() == 'valid':
shape_res2d = [max(kernel_shape[0] - strides2d[0], 0),
max(kernel_shape[1] - strides2d[1], 0)]
else:
shape_res2d = shape2d(0)
if data_format == 'NCHW':
channels_in = shape_sta[1]
out_shape_dyn = tf.stack(
[shape_dyn[0], filters,
shape_dyn[2] * strides2d[0] + shape_res2d[0],
shape_dyn[3] * strides2d[1] + shape_res2d[1]])
out_shape3_sta = [filters,
None if shape_sta[2] is None else shape_sta[2] * strides2d[0] + shape_res2d[0],
None if shape_sta[3] is None else shape_sta[3] * strides2d[1] + shape_res2d[1]]
else:
channels_in = shape_sta[-1]
out_shape_dyn = tf.stack(
[shape_dyn[0],
shape_dyn[1] * strides2d[0] + shape_res2d[0],
shape_dyn[2] * strides2d[1] + shape_res2d[1],
filters])
out_shape3_sta = [None if shape_sta[1] is None else shape_sta[1] * strides2d[0] + shape_res2d[0],
None if shape_sta[2] is None else shape_sta[2] * strides2d[1] + shape_res2d[1],
filters]
inputs_dtype = inputs.dtype
W = tf.get_variable('W', kernel_shape + [filters, channels_in],
dtype=inputs_dtype, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable('b', [filters], dtype=inputs_dtype, initializer=bias_initializer)
conv = tf.nn.conv2d_transpose(
inputs, W, out_shape_dyn,
shape4d(strides, data_format=data_format),
padding=padding.upper(),
data_format=data_format)
conv.set_shape(tf.TensorShape([shape_sta[0]] + out_shape3_sta))
ret = tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv
if activation is not None:
ret = activation(ret)
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
Deconv2D = Conv2DTranspose
| 10,577 | 38.470149 | 112 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/shape_utils.py | # -*- coding: utf-8 -*-
# File: shape_utils.py
import tensorflow as tf
__all__ = []
class StaticDynamicAxis(object):
def __init__(self, static, dynamic):
self.static = static
self.dynamic = dynamic
def apply(self, f):
try:
st = f(self.static)
return StaticDynamicAxis(st, st)
except TypeError:
return StaticDynamicAxis(None, f(self.dynamic))
def __str__(self):
return "S={}, D={}".format(str(self.static), str(self.dynamic))
def DynamicLazyAxis(shape, idx):
return lambda: shape[idx]
def StaticLazyAxis(dim):
return lambda: dim
class StaticDynamicShape(object):
def __init__(self, tensor):
assert isinstance(tensor, tf.Tensor), tensor
ndims = tensor.shape.ndims
self.static = tensor.shape.as_list()
if tensor.shape.is_fully_defined():
self.dynamic = self.static[:]
else:
dynamic = tf.shape(tensor)
self.dynamic = [DynamicLazyAxis(dynamic, k) for k in range(ndims)]
for k in range(ndims):
if self.static[k] is not None:
self.dynamic[k] = StaticLazyAxis(self.static[k])
def apply(self, axis, f):
if self.static[axis] is not None:
try:
st = f(self.static[axis])
self.static[axis] = st
self.dynamic[axis] = StaticLazyAxis(st)
return
except TypeError:
pass
self.static[axis] = None
dyn = self.dynamic[axis]
self.dynamic[axis] = lambda: f(dyn())
def get_static(self):
return self.static
@property
def ndims(self):
return len(self.static)
def get_dynamic(self, axis=None):
if axis is None:
return [self.dynamic[k]() for k in range(self.ndims)]
return self.dynamic[axis]()
if __name__ == '__main__':
x = tf.placeholder(tf.float32, shape=[None, 3, None, 10])
shape = StaticDynamicShape(x)
shape.apply(1, lambda x: x * 3)
shape.apply(2, lambda x: x + 5)
print(shape.get_static())
print(shape.get_dynamic())
| 2,152 | 25.580247 | 78 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/registry.py | # -*- coding: utf-8 -*-
# File: registry.py
import copy
import re
import collections
from functools import wraps
import six
import tensorflow as tf
from ..compat import tfv1
from ..tfutils.argscope import get_arg_scope
from ..tfutils.model_utils import get_shape_str
from ..utils import logger
# make sure each layer is only logged once
_LAYER_LOGGED = set()
_LAYER_REGISTRY = {}
__all__ = ['layer_register', 'disable_layer_logging']
_NameConflict = "LAYER_NAME_CONFLICT!!"
def _register(name, func):
if name in _LAYER_REGISTRY:
_LAYER_REGISTRY[name] = _NameConflict
return
if name in ['tf']:
raise ValueError(logger.error("A layer cannot be named {}".format(name)))
_LAYER_REGISTRY[name] = func
# handle alias
if name == 'Conv2DTranspose':
_register('Deconv2D', func)
def get_registered_layer(name):
"""
Args:
name (str): the name of the layer, e.g. 'Conv2D'
Returns:
the wrapped layer function, or None if not registered.
"""
ret = _LAYER_REGISTRY.get(name, None)
if ret == _NameConflict:
raise KeyError("Layer named '{}' is registered with `@layer_register` more than once!".format(name))
return ret
def disable_layer_logging():
"""
Disable the shape logging for all layers from this moment on. Can be
useful when creating multiple towers.
"""
class ContainEverything:
def __contains__(self, x):
return True
# can use nonlocal in python3, but how
globals()['_LAYER_LOGGED'] = ContainEverything()
class LayerShapeLogger():
"""
A class that logs shapes of inputs/outputs of layers,
during the possibly-nested calls to them.
"""
def __init__(self):
self.stack = collections.deque()
self.depth = 0
def _indent(self):
return " " * (self.depth * 2)
def push_inputs(self, name, message):
while len(self.stack):
item = self.stack.pop()
logger.info(self._indent() + "'{}' input: {}".format(item[0], item[1]))
self.depth += 1
self.stack.append((name, message))
def push_outputs(self, name, message):
if len(self.stack):
assert len(self.stack) == 1, self.stack
assert self.stack[-1][0] == name, self.stack
item = self.stack.pop()
logger.info(self._indent() + "'{}': {} --> {}".format(name, item[1], message))
else:
self.depth -= 1
logger.info(self._indent() + "'{}' output: {}".format(name, message))
_SHAPE_LOGGER = LayerShapeLogger()
def layer_register(
log_shape=False,
use_scope=True):
"""
Args:
log_shape (bool): log input/output shape of this layer
use_scope (bool or None):
Whether to call this layer with an extra first argument as variable scope.
When set to None, it can be called either with or without
the scope name argument, depend on whether the first argument
is string or not.
Returns:
A decorator used to register a layer.
Example:
.. code-block:: python
@layer_register(use_scope=True)
def add10(x):
return x + tf.get_variable('W', shape=[10])
# use it:
output = add10('layer_name', input) # the function will be called under variable scope "layer_name".
"""
def wrapper(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
assert args[0] is not None, args
if use_scope:
name, inputs = args[0], args[1]
args = args[1:] # actual positional args used to call func
assert isinstance(name, six.string_types), "First argument for \"{}\" should be a string. ".format(
func.__name__) + "Did you forget to specify the name of the layer?"
else:
assert not log_shape
if isinstance(args[0], six.string_types):
if use_scope is False:
logger.warn(
"Please call layer {} without the first scope name argument, "
"or register the layer with use_scope=None to allow calling it "
"with scope names.".format(func.__name__))
name, inputs = args[0], args[1]
args = args[1:] # actual positional args used to call func
else:
inputs = args[0]
name = None
if not (isinstance(inputs, (tf.Tensor, tf.Variable)) or
(isinstance(inputs, (list, tuple)) and
isinstance(inputs[0], (tf.Tensor, tf.Variable)))):
raise ValueError("Invalid inputs to layer: " + str(inputs))
# use kwargs from current argument scope
actual_args = copy.copy(get_arg_scope()[func.__name__])
# explicit kwargs overwrite argscope
actual_args.update(kwargs)
# if six.PY3:
# # explicit positional args also override argscope. only work in PY3
# posargmap = inspect.signature(func).bind_partial(*args).arguments
# for k in six.iterkeys(posargmap):
# if k in actual_args:
# del actual_args[k]
if name is not None: # use scope
with tfv1.variable_scope(name) as scope:
# this name is only used to surpress logging, doesn't hurt to do some heuristics
scope_name = re.sub('tower[0-9]+/', '', scope.name)
do_log_shape = log_shape and scope_name not in _LAYER_LOGGED
if do_log_shape:
_SHAPE_LOGGER.push_inputs(scope.name, get_shape_str(inputs))
# run the actual function
outputs = func(*args, **actual_args)
if do_log_shape:
_SHAPE_LOGGER.push_outputs(scope.name, get_shape_str(outputs))
_LAYER_LOGGED.add(scope_name)
else:
# run the actual function
outputs = func(*args, **actual_args)
return outputs
wrapped_func.use_scope = use_scope
wrapped_func.__argscope_enabled__ = True
_register(func.__name__, wrapped_func)
return wrapped_func
return wrapper
| 6,483 | 33.306878 | 115 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/tflayer.py | # -*- coding: utf-8 -*-
# File: tflayer.py
import functools
import six
import tensorflow as tf
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.varreplace import custom_getter_scope
from ..utils.argtools import get_data_format
__all__ = []
def map_common_tfargs(kwargs):
df = kwargs.pop('data_format', None)
if df is not None:
df = get_data_format(df, keras_mode=True)
kwargs['data_format'] = df
old_nl = kwargs.pop('nl', None)
if old_nl is not None:
kwargs['activation'] = lambda x, name=None: old_nl(x, name=name)
if 'W_init' in kwargs:
kwargs['kernel_initializer'] = kwargs.pop('W_init')
if 'b_init' in kwargs:
kwargs['bias_initializer'] = kwargs.pop('b_init')
return kwargs
def convert_to_tflayer_args(args_names, name_mapping):
"""
After applying this decorator:
1. data_format becomes tf.layers style
2. nl becomes activation
3. initializers are renamed
4. positional args are transformed to corresponding kwargs, according to args_names
5. kwargs are mapped to tf.layers names if needed, by name_mapping
"""
def decorator(func):
@functools.wraps(func)
def decorated_func(inputs, *args, **kwargs):
kwargs = map_common_tfargs(kwargs)
posarg_dic = {}
assert len(args) <= len(args_names), \
"Please use kwargs instead of positional args to call this model, " \
"except for the following arguments: {}".format(', '.join(args_names))
for pos_arg, name in zip(args, args_names):
posarg_dic[name] = pos_arg
ret = {}
for name, arg in six.iteritems(kwargs):
newname = name_mapping.get(name, None)
if newname is not None:
assert newname not in kwargs, \
"Argument {} and {} conflicts!".format(name, newname)
else:
newname = name
ret[newname] = arg
ret.update(posarg_dic) # Let pos arg overwrite kw arg, for argscope to work
return func(inputs, **ret)
return decorated_func
return decorator
def rename_get_variable(mapping):
"""
Args:
mapping(dict): an old -> new mapping for variable basename. e.g. {'kernel': 'W'}
Returns:
A context where the variables are renamed.
"""
def custom_getter(getter, name, *args, **kwargs):
splits = name.split('/')
basename = splits[-1]
if basename in mapping:
basename = mapping[basename]
splits[-1] = basename
name = '/'.join(splits)
return getter(name, *args, **kwargs)
return custom_getter_scope(custom_getter)
def rename_tflayer_get_variable():
"""
Rename all :func:`tf.get_variable` with rules that transforms tflayer style to tensorpack style.
Returns:
A context where the variables are renamed.
Example:
.. code-block:: python
with rename_tflayer_get_variable():
x = tf.layer.conv2d(input, 3, 3, name='conv0')
# variables will be named 'conv0/W', 'conv0/b'
"""
mapping = {
'kernel': 'W',
'bias': 'b',
'moving_mean': 'mean/EMA',
'moving_variance': 'variance/EMA',
}
return rename_get_variable(mapping)
def monkeypatch_tf_layers():
if get_tf_version_tuple() < (1, 4):
if not hasattr(tf.layers, 'Dense'):
from tensorflow.python.layers.core import Dense
tf.layers.Dense = Dense
from tensorflow.python.layers.normalization import BatchNormalization
tf.layers.BatchNormalization = BatchNormalization
from tensorflow.python.layers.convolutional import Conv2DTranspose, Conv2D
tf.layers.Conv2DTranspose = Conv2DTranspose
tf.layers.Conv2D = Conv2D
from tensorflow.python.layers.pooling import MaxPooling2D, AveragePooling2D
tf.layers.MaxPooling2D = MaxPooling2D
tf.layers.AveragePooling2D = AveragePooling2D
monkeypatch_tf_layers()
| 4,159 | 29.814815 | 100 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/utils.py | # -*- coding: utf-8 -*-
# File: utils.py
import six
class VariableHolder(object):
""" A proxy to access variables defined in a layer. """
def __init__(self, **kwargs):
"""
Args:
kwargs: {name:variable}
"""
self._vars = {}
for k, v in six.iteritems(kwargs):
self._add_variable(k, v)
def _add_variable(self, name, var):
assert name not in self._vars
self._vars[name] = var
def __setattr__(self, name, var):
if not name.startswith('_'):
self._add_variable(name, var)
else:
# private attributes
super(VariableHolder, self).__setattr__(name, var)
def __getattr__(self, name):
return self._vars[name]
def all(self):
"""
Returns:
list of all variables
"""
return list(six.itervalues(self._vars))
try:
# When BN is used as an activation, keras layers try to autograph.convert it
# This leads to massive warnings so we disable it.
from tensorflow.python.autograph.impl.api import do_not_convert as disable_autograph
except ImportError:
def disable_autograph():
return lambda x: x
| 1,212 | 24.808511 | 88 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/models_test.py | # -*- coding: utf-8 -*-
# File: _test.py
import logging
import unittest
import tensorflow as tf
import numpy as np
from .conv2d import Conv2DTranspose
from .pool import FixedUnPooling
class TestModel(unittest.TestCase):
def eval(self, x, feed_dict=None):
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if isinstance(x, list):
return sess.run(x, feed_dict=feed_dict)
else:
return sess.run([x], feed_dict=feed_dict)[0]
def make_variable(self, *args):
if len(args) > 1:
return [tf.Variable(k) for k in args]
else:
return tf.Variable(args[0])
class TestPool(TestModel):
def test_FixedUnPooling(self):
h, w = 3, 4
scale = 2
mat = np.random.rand(h, w, 3).astype('float32')
input = self.make_variable(mat)
input = tf.reshape(input, [1, h, w, 3])
output = FixedUnPooling('unpool', input, scale)
res = self.eval(output)
self.assertEqual(res.shape, (1, scale * h, scale * w, 3))
# mat is on corner
ele = res[0, ::scale, ::scale, 0]
self.assertTrue((ele == mat[:, :, 0]).all())
# the rest are zeros
res[0, ::scale, ::scale, :] = 0
self.assertTrue((res == 0).all())
# Below was originally for the BilinearUpsample layer used in the HED example
# def test_BilinearUpSample(self):
# h, w = 12, 12
# scale = 2
#
# mat = np.random.rand(h, w).astype('float32')
# inp = self.make_variable(mat)
# inp = tf.reshape(inp, [1, h, w, 1])
#
# output = BilinearUpSample(inp, scale)
# res = self.eval(output)[0, :, :, 0]
#
# from skimage.transform import rescale
# res2 = rescale(mat, scale, mode='edge')
#
# diff = np.abs(res2 - res)
#
# # if not diff.max() < 1e-4:
# # import IPython
# # IPython.embed(config=IPython.terminal.ipapp.load_default_config())
# self.assertTrue(diff.max() < 1e-4, diff.max())
class TestConv2DTranspose(TestModel):
def setUp(self):
tf.reset_default_graph()
def test_shape_match(self):
h, w = 12, 18
input = self.make_variable(np.random.rand(1, h, w, 3).astype("float32"))
for padding in ["same", "valid"]:
for stride in [1, 2]:
output = Conv2DTranspose(
'deconv_s{}_pad{}'.format(stride, padding),
input, 20, 3, strides=stride, padding=padding)
static_shape = output.shape
dynamic_shape = self.eval(output).shape
self.assertTrue(static_shape == dynamic_shape)
def test_unspecified_shape_match(self):
h, w = 12, 18
input = tf.placeholder(shape=(1, h, None, 3), dtype=tf.float32)
for padding in ["same", "valid"]:
for stride in [1, 2]:
output = Conv2DTranspose(
'deconv_s{}_pad{}'.format(stride, padding),
input, 20, 3, strides=stride, padding=padding)
static_shape = tuple(output.shape.as_list())
dynamic_shape = self.eval(
output,
feed_dict={input: np.random.rand(1, h, w, 3)}).shape
self.assertTrue(static_shape[2] is None)
self.assertTrue(static_shape[:2] == dynamic_shape[:2])
self.assertTrue(static_shape[3] == dynamic_shape[3])
def run_test_case(case):
suite = unittest.TestLoader().loadTestsFromTestCase(case)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
from tensorpack.utils import logger
logger.setLevel(logging.CRITICAL)
unittest.main()
| 3,753 | 31.362069 | 82 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/_old_batch_norm.py | # -*- coding: utf-8 -*-
# File: _old_batch_norm.py
import tensorflow as tf
from tensorflow.contrib.framework import add_model_variable
from tensorflow.python.training import moving_averages
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.tower import get_current_tower_context
from ..utils import logger
from ..utils.argtools import get_data_format
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args
"""
Old Custom BN Implementation, Kept Here For Future Reference
"""
def get_bn_variables(n_out, use_scale, use_bias, gamma_init):
if use_bias:
beta = tf.get_variable('beta', [n_out], initializer=tf.constant_initializer())
else:
beta = tf.zeros([n_out], name='beta')
if use_scale:
gamma = tf.get_variable('gamma', [n_out], initializer=gamma_init)
else:
gamma = tf.ones([n_out], name='gamma')
# x * gamma + beta
moving_mean = tf.get_variable('mean/EMA', [n_out],
initializer=tf.constant_initializer(), trainable=False)
moving_var = tf.get_variable('variance/EMA', [n_out],
initializer=tf.constant_initializer(1.0), trainable=False)
return beta, gamma, moving_mean, moving_var
def update_bn_ema(xn, batch_mean, batch_var,
moving_mean, moving_var, decay, internal_update):
update_op1 = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay, zero_debias=False,
name='mean_ema_op')
update_op2 = moving_averages.assign_moving_average(
moving_var, batch_var, decay, zero_debias=False,
name='var_ema_op')
if internal_update:
with tf.control_dependencies([update_op1, update_op2]):
return tf.identity(xn, name='output')
else:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op1)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op2)
return tf.identity(xn, name='output')
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
'decay': 'momentum',
'use_local_stat': 'training'
})
def BatchNorm(inputs, training=None, momentum=0.9, epsilon=1e-5,
center=True, scale=True,
gamma_initializer=tf.ones_initializer(),
data_format='channels_last',
internal_update=False):
"""
Mostly equivalent to `tf.layers.batch_normalization`, but difference in
the following:
1. Accepts `data_format` rather than `axis`. For 2D input, this argument will be ignored.
2. Default value for `momentum` and `epsilon` is different.
3. Default value for `training` is automatically obtained from `TowerContext`.
4. Support the `internal_update` option.
Args:
internal_update (bool): if False, add EMA update ops to
`tf.GraphKeys.UPDATE_OPS`. If True, update EMA inside the layer
by control dependencies.
Variable Names:
* ``beta``: the bias term. Will be zero-inited by default.
* ``gamma``: the scale term. Will be one-inited by default. Input will be transformed by ``x * gamma + beta``.
* ``mean/EMA``: the moving average of mean.
* ``variance/EMA``: the moving average of variance.
Note:
1. About multi-GPU training: moving averages across GPUs are not aggregated.
Batch statistics are computed independently. This is consistent with most frameworks.
2. Combinations of ``training`` and ``ctx.is_training``:
* ``training == ctx.is_training``: standard BN, EMA are
maintained during training and used during inference. This is
the default.
* ``training and not ctx.is_training``: still use batch statistics in inference.
* ``not training and ctx.is_training``: use EMA to normalize in
training. This is useful when you load a pre-trained BN and
don't want to fine tune the EMA. EMA will not be updated in
this case.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = inputs.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4]
if ndims == 2:
data_format = 'NHWC'
if data_format == 'NCHW':
n_out = shape[1]
else:
n_out = shape[-1] # channel
assert n_out is not None, "Input to BatchNorm cannot have unknown channels!"
beta, gamma, moving_mean, moving_var = get_bn_variables(n_out, scale, center, gamma_initializer)
ctx = get_current_tower_context()
use_local_stat = training
if use_local_stat is None:
use_local_stat = ctx.is_training
use_local_stat = bool(use_local_stat)
if use_local_stat:
if ndims == 2:
inputs = tf.reshape(inputs, [-1, 1, 1, n_out]) # fused_bn only takes 4D input
# fused_bn has error using NCHW? (see #190)
xn, batch_mean, batch_var = tf.nn.fused_batch_norm(
inputs, gamma, beta, epsilon=epsilon,
is_training=True, data_format=data_format)
if ndims == 2:
xn = tf.squeeze(xn, [1, 2])
else:
if ctx.is_training:
assert get_tf_version_tuple() >= (1, 4), \
"Fine tuning a BatchNorm model with fixed statistics is only " \
"supported after https://github.com/tensorflow/tensorflow/pull/12580 "
if ctx.is_main_training_tower: # only warn in first tower
logger.warn("[BatchNorm] Using moving_mean/moving_variance in training.")
# Using moving_mean/moving_variance in training, which means we
# loaded a pre-trained BN and only fine-tuning the affine part.
xn, _, _ = tf.nn.fused_batch_norm(
inputs, gamma, beta,
mean=moving_mean, variance=moving_var, epsilon=epsilon,
data_format=data_format, is_training=False)
else:
if ndims == 4:
xn, _, _ = tf.nn.fused_batch_norm(
inputs, gamma, beta,
mean=moving_mean, variance=moving_var, epsilon=epsilon,
data_format=data_format, is_training=False)
else:
xn = tf.nn.batch_normalization(
inputs, moving_mean, moving_var, beta, gamma, epsilon)
# maintain EMA only on one GPU is OK, even in replicated mode.
# because training time doesn't use EMA
if ctx.is_main_training_tower:
add_model_variable(moving_mean)
add_model_variable(moving_var)
if ctx.is_main_training_tower and use_local_stat:
ret = update_bn_ema(xn, batch_mean, batch_var, moving_mean, moving_var, momentum, internal_update)
else:
ret = tf.identity(xn, name='output')
vh = ret.variables = VariableHolder(mean=moving_mean, variance=moving_var)
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
| 7,082 | 40.664706 | 114 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/linearwrap.py | # -*- coding: utf-8 -*-
# File: linearwrap.py
from types import ModuleType
import six
from .registry import get_registered_layer
__all__ = ['LinearWrap']
class LinearWrap(object):
""" A simple wrapper to easily create "linear" graph,
consisting of layers / symbolic functions with only one input & output.
"""
class _TFModuleFunc(object):
def __init__(self, mod, tensor):
self._mod = mod
self._t = tensor
def __getattr__(self, name):
ret = getattr(self._mod, name)
if isinstance(ret, ModuleType):
return LinearWrap._TFModuleFunc(ret, self._t)
else:
# assume to be a tf function
def f(*args, **kwargs):
o = ret(self._t, *args, **kwargs)
return LinearWrap(o)
return f
def __init__(self, tensor):
"""
Args:
tensor (tf.Tensor): the tensor to wrap
"""
self._t = tensor
def __getattr__(self, layer_name):
layer = get_registered_layer(layer_name)
if layer is not None:
# this is a registered tensorpack layer
# parse arguments by tensorpack model convention
if layer.use_scope:
def layer_func(name, *args, **kwargs):
ret = layer(name, self._t, *args, **kwargs)
return LinearWrap(ret)
else:
def layer_func(*args, **kwargs):
if len(args) and isinstance(args[0], six.string_types):
name, args = args[0], args[1:]
ret = layer(name, self._t, *args, **kwargs)
else:
ret = layer(self._t, *args, **kwargs)
return LinearWrap(ret)
return layer_func
else:
assert layer_name == 'tf', \
"Calling LinearWrap.{}:" \
" neither a layer nor 'tf'! " \
"Did you forget to extract tensor from LinearWrap?".format(layer_name)
import tensorflow as layer # noqa
assert isinstance(layer, ModuleType), layer
return LinearWrap._TFModuleFunc(layer, self._t)
def apply(self, func, *args, **kwargs):
"""
Apply a function on the wrapped tensor.
Returns:
LinearWrap: ``LinearWrap(func(self.tensor(), *args, **kwargs))``.
"""
ret = func(self._t, *args, **kwargs)
return LinearWrap(ret)
def apply2(self, func, *args, **kwargs):
"""
Apply a function on the wrapped tensor. The tensor
will be the second argument of func.
This is because many symbolic functions
(such as tensorpack's layers) takes 'scope' as the first argument.
Returns:
LinearWrap: ``LinearWrap(func(args[0], self.tensor(), *args[1:], **kwargs))``.
"""
ret = func(args[0], self._t, *(args[1:]), **kwargs)
return LinearWrap(ret)
def __call__(self):
"""
Returns:
tf.Tensor: the underlying wrapped tensor.
"""
return self._t
def tensor(self):
"""
Equivalent to ``self.__call__()``.
Returns:
tf.Tensor: the underlying wrapped tensor.
"""
return self._t
def print_tensor(self):
"""
Print the underlying tensor and return self. Can be useful to get the
name of tensors inside :class:`LinearWrap`.
:return: self
"""
print(self._t)
return self
| 3,634 | 30.068376 | 90 | py |
Subsets and Splits